summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2016-05-28 17:55:12 -0400
committerMark Benvenuto <mark.benvenuto@mongodb.com>2016-05-28 17:55:12 -0400
commit6dcdd23dd37ef12c87e71cf59ef01cd82432efe0 (patch)
treec8cfb5acb62c80f375bc37e7d4350382deea6a37 /src/mongo/db
parentd4ac5673ea3f6cef4ce9dbcec90e31813997a528 (diff)
downloadmongo-6dcdd23dd37ef12c87e71cf59ef01cd82432efe0.tar.gz
SERVER-23971 Clang-Format code
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/action_set.cpp8
-rw-r--r--src/mongo/db/auth/auth_decorations.cpp2
-rw-r--r--src/mongo/db/auth/auth_index_d.cpp24
-rw-r--r--src/mongo/db/auth/authorization_manager.cpp16
-rw-r--r--src/mongo/db/auth/authorization_manager_global.cpp3
-rw-r--r--src/mongo/db/auth/authorization_manager_test.cpp128
-rw-r--r--src/mongo/db/auth/authorization_session.cpp8
-rw-r--r--src/mongo/db/auth/authorization_session_test.cpp70
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.cpp3
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp30
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp3
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.h2
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_s.cpp50
-rw-r--r--src/mongo/db/auth/authz_session_external_state_server_common.cpp3
-rw-r--r--src/mongo/db/auth/native_sasl_authentication_session.cpp2
-rw-r--r--src/mongo/db/auth/privilege_parser_test.cpp60
-rw-r--r--src/mongo/db/auth/role_graph.cpp47
-rw-r--r--src/mongo/db/auth/role_graph_update.cpp4
-rw-r--r--src/mongo/db/auth/sasl_authentication_session.cpp2
-rw-r--r--src/mongo/db/auth/sasl_options.cpp44
-rw-r--r--src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp50
-rw-r--r--src/mongo/db/auth/security_file.cpp8
-rw-r--r--src/mongo/db/auth/security_key.cpp15
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp6
-rw-r--r--src/mongo/db/auth/user_document_parser_test.cpp233
-rw-r--r--src/mongo/db/auth/user_management_commands_parser.cpp7
-rw-r--r--src/mongo/db/auth/user_management_commands_parser.h2
-rw-r--r--src/mongo/db/background.cpp6
-rw-r--r--src/mongo/db/background.h2
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp2
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp6
-rw-r--r--src/mongo/db/catalog/collection.cpp11
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp10
-rw-r--r--src/mongo/db/catalog/collection_options_test.cpp6
-rw-r--r--src/mongo/db/catalog/cursor_manager.cpp2
-rw-r--r--src/mongo/db/catalog/database.cpp4
-rw-r--r--src/mongo/db/catalog/database_holder.cpp6
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp4
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp34
-rw-r--r--src/mongo/db/catalog/index_create.cpp2
-rw-r--r--src/mongo/db/catalog/index_key_validate_test.cpp3
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp3
-rw-r--r--src/mongo/db/clientlistplugin.cpp3
-rw-r--r--src/mongo/db/cloner.cpp12
-rw-r--r--src/mongo/db/cloner.h2
-rw-r--r--src/mongo/db/commands.cpp10
-rw-r--r--src/mongo/db/commands/apply_ops_cmd.cpp16
-rw-r--r--src/mongo/db/commands/clone.cpp2
-rw-r--r--src/mongo/db/commands/clone_collection.cpp4
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp13
-rw-r--r--src/mongo/db/commands/copydb.cpp21
-rw-r--r--src/mongo/db/commands/copydb_common.cpp4
-rw-r--r--src/mongo/db/commands/copydb_start_commands.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp9
-rw-r--r--src/mongo/db/commands/distinct.cpp13
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp2
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp13
-rw-r--r--src/mongo/db/commands/find_cmd.cpp4
-rw-r--r--src/mongo/db/commands/generic.cpp9
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp6
-rw-r--r--src/mongo/db/commands/group_cmd.cpp4
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp6
-rw-r--r--src/mongo/db/commands/index_filter_commands.h2
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp28
-rw-r--r--src/mongo/db/commands/list_indexes.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp17
-rw-r--r--src/mongo/db/commands/mr_test.cpp6
-rw-r--r--src/mongo/db/commands/oplog_note.cpp4
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp5
-rw-r--r--src/mongo/db/commands/parameters.cpp19
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp6
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp6
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp15
-rw-r--r--src/mongo/db/commands/rename_collection_cmd.cpp4
-rw-r--r--src/mongo/db/commands/server_status.cpp2
-rw-r--r--src/mongo/db/commands/server_status.h2
-rw-r--r--src/mongo/db/commands/test_commands.cpp12
-rw-r--r--src/mongo/db/commands/top_command.cpp2
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp109
-rw-r--r--src/mongo/db/commands/user_management_commands_common.cpp19
-rw-r--r--src/mongo/db/commands/write_commands/write_commands_common.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/write_commands_common.h2
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp2
-rw-r--r--src/mongo/db/concurrency/lock_manager_defs.h2
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp2
-rw-r--r--src/mongo/db/curop.cpp3
-rw-r--r--src/mongo/db/curop.h2
-rw-r--r--src/mongo/db/curop_metrics.cpp2
-rw-r--r--src/mongo/db/db.cpp13
-rw-r--r--src/mongo/db/db.h2
-rw-r--r--src/mongo/db/db_raii.cpp2
-rw-r--r--src/mongo/db/dbcommands.cpp19
-rw-r--r--src/mongo/db/dbhelpers.cpp8
-rw-r--r--src/mongo/db/dbhelpers.h2
-rw-r--r--src/mongo/db/dbwebserver.cpp7
-rw-r--r--src/mongo/db/exec/and_hash.cpp2
-rw-r--r--src/mongo/db/exec/and_hash.h2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp3
-rw-r--r--src/mongo/db/exec/cached_plan.h2
-rw-r--r--src/mongo/db/exec/collection_scan.cpp10
-rw-r--r--src/mongo/db/exec/delete.cpp2
-rw-r--r--src/mongo/db/exec/geo_near.cpp2
-rw-r--r--src/mongo/db/exec/geo_near.h2
-rw-r--r--src/mongo/db/exec/keep_mutations.h2
-rw-r--r--src/mongo/db/exec/limit.h2
-rw-r--r--src/mongo/db/exec/multi_plan.cpp4
-rw-r--r--src/mongo/db/exec/multi_plan.h4
-rw-r--r--src/mongo/db/exec/near.h2
-rw-r--r--src/mongo/db/exec/pipeline_proxy.h2
-rw-r--r--src/mongo/db/exec/projection_exec_test.cpp4
-rw-r--r--src/mongo/db/exec/sort.cpp2
-rw-r--r--src/mongo/db/exec/sort.h2
-rw-r--r--src/mongo/db/exec/sort_key_generator.h2
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp12
-rw-r--r--src/mongo/db/exec/subplan.cpp2
-rw-r--r--src/mongo/db/exec/text.cpp4
-rw-r--r--src/mongo/db/exec/text_match.cpp2
-rw-r--r--src/mongo/db/exec/update.cpp24
-rw-r--r--src/mongo/db/exec/working_set.h2
-rw-r--r--src/mongo/db/exec/working_set_common.cpp4
-rw-r--r--src/mongo/db/exec/working_set_test.cpp2
-rw-r--r--src/mongo/db/field_parser_test.cpp24
-rw-r--r--src/mongo/db/ftdc/compressor_test.cpp201
-rw-r--r--src/mongo/db/ftdc/file_manager.cpp10
-rw-r--r--src/mongo/db/ftdc/file_manager_test.cpp94
-rw-r--r--src/mongo/db/ftdc/file_reader.cpp3
-rw-r--r--src/mongo/db/ftdc/file_writer_test.cpp86
-rw-r--r--src/mongo/db/ftdc/ftdc_test.cpp4
-rw-r--r--src/mongo/db/ftdc/util.cpp7
-rw-r--r--src/mongo/db/ftdc/varint.h2
-rw-r--r--src/mongo/db/fts/fts_element_iterator.cpp3
-rw-r--r--src/mongo/db/fts/fts_index_format.cpp7
-rw-r--r--src/mongo/db/fts/fts_index_format_test.cpp32
-rw-r--r--src/mongo/db/fts/fts_language.cpp9
-rw-r--r--src/mongo/db/fts/fts_language.h2
-rw-r--r--src/mongo/db/fts/fts_language_test.cpp2
-rw-r--r--src/mongo/db/fts/fts_matcher.cpp2
-rw-r--r--src/mongo/db/fts/fts_query_impl.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_impl_test.cpp25
-rw-r--r--src/mongo/db/fts/fts_spec.cpp14
-rw-r--r--src/mongo/db/fts/fts_spec.h2
-rw-r--r--src/mongo/db/fts/fts_spec_legacy.cpp4
-rw-r--r--src/mongo/db/fts/fts_spec_test.cpp22
-rw-r--r--src/mongo/db/geo/big_polygon_test.cpp155
-rw-r--r--src/mongo/db/geo/geoparser.cpp5
-rw-r--r--src/mongo/db/geo/geoparser_test.cpp76
-rw-r--r--src/mongo/db/geo/hash.cpp17
-rw-r--r--src/mongo/db/geo/hash.h2
-rw-r--r--src/mongo/db/geo/hash_test.cpp8
-rw-r--r--src/mongo/db/geo/r2_region_coverer.cpp2
-rw-r--r--src/mongo/db/geo/r2_region_coverer_test.cpp7
-rw-r--r--src/mongo/db/geo/shapes.cpp2
-rw-r--r--src/mongo/db/geo/shapes.h2
-rw-r--r--src/mongo/db/hasher_test.cpp5
-rw-r--r--src/mongo/db/index/2d_access_method.cpp2
-rw-r--r--src/mongo/db/index/btree_access_method.h2
-rw-r--r--src/mongo/db/index/btree_key_generator.cpp6
-rw-r--r--src/mongo/db/index/expression_keys_private.cpp2
-rw-r--r--src/mongo/db/index/expression_keys_private.h2
-rw-r--r--src/mongo/db/index/expression_params.cpp12
-rw-r--r--src/mongo/db/index/expression_params.h2
-rw-r--r--src/mongo/db/index/external_key_generator.cpp2
-rw-r--r--src/mongo/db/index/hash_access_method.cpp2
-rw-r--r--src/mongo/db/index/hash_access_method.h2
-rw-r--r--src/mongo/db/index/index_access_method.cpp6
-rw-r--r--src/mongo/db/index/index_descriptor.h2
-rw-r--r--src/mongo/db/index/s2_access_method.cpp40
-rw-r--r--src/mongo/db/index/s2_key_generator_test.cpp12
-rw-r--r--src/mongo/db/index_rebuilder.cpp2
-rw-r--r--src/mongo/db/initialize_server_global_state.cpp33
-rw-r--r--src/mongo/db/instance.cpp15
-rw-r--r--src/mongo/db/jsobj.h12
-rw-r--r--src/mongo/db/keypattern.cpp3
-rw-r--r--src/mongo/db/keypattern_test.cpp6
-rw-r--r--src/mongo/db/matcher/expression.cpp2
-rw-r--r--src/mongo/db/matcher/expression.h2
-rw-r--r--src/mongo/db/matcher/expression_algo_test.cpp23
-rw-r--r--src/mongo/db/matcher/expression_array.h2
-rw-r--r--src/mongo/db/matcher/expression_geo.cpp19
-rw-r--r--src/mongo/db/matcher/expression_geo_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_leaf.cpp4
-rw-r--r--src/mongo/db/matcher/expression_leaf.h2
-rw-r--r--src/mongo/db/matcher/expression_leaf_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_parser.cpp4
-rw-r--r--src/mongo/db/matcher/expression_parser.h3
-rw-r--r--src/mongo/db/matcher/expression_parser_array_test.cpp152
-rw-r--r--src/mongo/db/matcher/expression_parser_leaf_test.cpp583
-rw-r--r--src/mongo/db/matcher/expression_serialization_test.cpp61
-rw-r--r--src/mongo/db/matcher/expression_text.cpp6
-rw-r--r--src/mongo/db/matcher/expression_text_base.cpp6
-rw-r--r--src/mongo/db/matcher/expression_tree.cpp2
-rw-r--r--src/mongo/db/matcher/expression_tree_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_where.cpp2
-rw-r--r--src/mongo/db/matcher/matchable.cpp4
-rw-r--r--src/mongo/db/matcher/matcher.cpp2
-rw-r--r--src/mongo/db/matcher/path.cpp4
-rw-r--r--src/mongo/db/mongod_options.cpp342
-rw-r--r--src/mongo/db/op_observer.cpp4
-rw-r--r--src/mongo/db/operation_context_impl.cpp2
-rw-r--r--src/mongo/db/operation_context_noop.h2
-rw-r--r--src/mongo/db/ops/field_checker.cpp3
-rw-r--r--src/mongo/db/ops/insert.cpp14
-rw-r--r--src/mongo/db/ops/log_builder.cpp12
-rw-r--r--src/mongo/db/ops/log_builder_test.cpp27
-rw-r--r--src/mongo/db/ops/modifier_add_to_set.cpp17
-rw-r--r--src/mongo/db/ops/modifier_bit.cpp18
-rw-r--r--src/mongo/db/ops/modifier_compare.cpp3
-rw-r--r--src/mongo/db/ops/modifier_current_date.cpp6
-rw-r--r--src/mongo/db/ops/modifier_inc.cpp23
-rw-r--r--src/mongo/db/ops/modifier_object_replace.cpp9
-rw-r--r--src/mongo/db/ops/modifier_pop.cpp13
-rw-r--r--src/mongo/db/ops/modifier_pop_test.cpp2
-rw-r--r--src/mongo/db/ops/modifier_pull.cpp3
-rw-r--r--src/mongo/db/ops/modifier_pull_all.cpp13
-rw-r--r--src/mongo/db/ops/modifier_push.cpp29
-rw-r--r--src/mongo/db/ops/modifier_push_sorter.h2
-rw-r--r--src/mongo/db/ops/modifier_push_test.cpp34
-rw-r--r--src/mongo/db/ops/modifier_rename.cpp21
-rw-r--r--src/mongo/db/ops/modifier_set.cpp3
-rw-r--r--src/mongo/db/ops/modifier_unset.cpp3
-rw-r--r--src/mongo/db/ops/parsed_delete.cpp2
-rw-r--r--src/mongo/db/ops/parsed_update.h2
-rw-r--r--src/mongo/db/ops/path_support.cpp9
-rw-r--r--src/mongo/db/ops/path_support_test.cpp16
-rw-r--r--src/mongo/db/ops/update.cpp3
-rw-r--r--src/mongo/db/ops/update.h2
-rw-r--r--src/mongo/db/ops/update_driver.cpp18
-rw-r--r--src/mongo/db/ops/update_lifecycle_impl.cpp4
-rw-r--r--src/mongo/db/ops/update_request.h2
-rw-r--r--src/mongo/db/ops/update_result.h2
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp7
-rw-r--r--src/mongo/db/ops/write_ops_parsers.cpp23
-rw-r--r--src/mongo/db/ops/write_ops_parsers.h2
-rw-r--r--src/mongo/db/ops/write_ops_parsers_test.cpp68
-rw-r--r--src/mongo/db/pipeline/accumulator.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator.h2
-rw-r--r--src/mongo/db/pipeline/document_internal.h4
-rw-r--r--src/mongo/db/pipeline/document_source.h2
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_geo_near.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup.cpp36
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.cpp59
-rw-r--r--src/mongo/db/pipeline/document_source_match.cpp64
-rw-r--r--src/mongo/db/pipeline/document_source_merge_cursors.cpp11
-rw-r--r--src/mongo/db/pipeline/document_source_out.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_redact.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp12
-rw-r--r--src/mongo/db/pipeline/document_source_sort.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_test.cpp70
-rw-r--r--src/mongo/db/pipeline/document_source_unwind.cpp3
-rw-r--r--src/mongo/db/pipeline/expression.cpp172
-rw-r--r--src/mongo/db/pipeline/expression.h16
-rw-r--r--src/mongo/db/pipeline/expression_test.cpp216
-rw-r--r--src/mongo/db/pipeline/field_path_test.cpp2
-rw-r--r--src/mongo/db/pipeline/lookup_set_cache.h8
-rw-r--r--src/mongo/db/pipeline/lookup_set_cache_test.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp3
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp13
-rw-r--r--src/mongo/db/pipeline/value.cpp2
-rw-r--r--src/mongo/db/pipeline/value.h3
-rw-r--r--src/mongo/db/pipeline/value_internal.h4
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp170
-rw-r--r--src/mongo/db/query/collation/collation_serializer_test.cpp135
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu.cpp136
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu_decoration.cpp5
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu_test.cpp468
-rw-r--r--src/mongo/db/query/collation/collator_factory_mock.cpp2
-rw-r--r--src/mongo/db/query/count_request_test.cpp56
-rw-r--r--src/mongo/db/query/cursor_response.cpp24
-rw-r--r--src/mongo/db/query/cursor_response_test.cpp158
-rw-r--r--src/mongo/db/query/explain.cpp2
-rw-r--r--src/mongo/db/query/expression_index.cpp2
-rw-r--r--src/mongo/db/query/find.cpp12
-rw-r--r--src/mongo/db/query/get_executor.cpp13
-rw-r--r--src/mongo/db/query/get_executor.h10
-rw-r--r--src/mongo/db/query/getmore_request.cpp11
-rw-r--r--src/mongo/db/query/getmore_request_test.cpp49
-rw-r--r--src/mongo/db/query/index_bounds_builder.h2
-rw-r--r--src/mongo/db/query/index_bounds_builder_test.cpp20
-rw-r--r--src/mongo/db/query/index_bounds_test.cpp6
-rw-r--r--src/mongo/db/query/killcursors_request.cpp4
-rw-r--r--src/mongo/db/query/killcursors_request_test.cpp9
-rw-r--r--src/mongo/db/query/killcursors_response.cpp4
-rw-r--r--src/mongo/db/query/killcursors_response_test.cpp42
-rw-r--r--src/mongo/db/query/lite_parsed_query.cpp31
-rw-r--r--src/mongo/db/query/lite_parsed_query_test.cpp6
-rw-r--r--src/mongo/db/query/parsed_projection_test.cpp8
-rw-r--r--src/mongo/db/query/plan_cache.cpp8
-rw-r--r--src/mongo/db/query/plan_cache.h2
-rw-r--r--src/mongo/db/query/plan_cache_indexability.cpp4
-rw-r--r--src/mongo/db/query/plan_cache_indexability_test.cpp4
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp54
-rw-r--r--src/mongo/db/query/plan_enumerator.cpp5
-rw-r--r--src/mongo/db/query/plan_executor.cpp5
-rw-r--r--src/mongo/db/query/plan_ranker.cpp2
-rw-r--r--src/mongo/db/query/planner_access.cpp2
-rw-r--r--src/mongo/db/query/planner_analysis.cpp4
-rw-r--r--src/mongo/db/query/planner_analysis_test.cpp10
-rw-r--r--src/mongo/db/query/planner_ixselect.cpp2
-rw-r--r--src/mongo/db/query/planner_ixselect_test.cpp2
-rw-r--r--src/mongo/db/query/query_planner.cpp21
-rw-r--r--src/mongo/db/query/query_planner_array_test.cpp78
-rw-r--r--src/mongo/db/query/query_planner_collation_test.cpp18
-rw-r--r--src/mongo/db/query/query_planner_geo_test.cpp299
-rw-r--r--src/mongo/db/query/query_planner_test.cpp122
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp8
-rw-r--r--src/mongo/db/query/query_planner_test_lib.cpp2
-rw-r--r--src/mongo/db/query/query_planner_test_lib.h2
-rw-r--r--src/mongo/db/query/query_planner_text_test.cpp123
-rw-r--r--src/mongo/db/query/query_solution.h2
-rw-r--r--src/mongo/db/query/query_solution_test.cpp6
-rw-r--r--src/mongo/db/query/stage_builder.cpp6
-rw-r--r--src/mongo/db/range_arithmetic.h2
-rw-r--r--src/mongo/db/range_deleter.cpp2
-rw-r--r--src/mongo/db/repair_database.cpp13
-rw-r--r--src/mongo/db/repl/applier_test.cpp64
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.cpp5
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.h2
-rw-r--r--src/mongo/db/repl/bgsync.cpp13
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp247
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp4
-rw-r--r--src/mongo/db/repl/collection_cloner.h2
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp12
-rw-r--r--src/mongo/db/repl/data_replicator.cpp4
-rw-r--r--src/mongo/db/repl/data_replicator.h4
-rw-r--r--src/mongo/db/repl/data_replicator_test.cpp387
-rw-r--r--src/mongo/db/repl/database_cloner.cpp32
-rw-r--r--src/mongo/db/repl/database_cloner.h2
-rw-r--r--src/mongo/db/repl/database_cloner_test.cpp115
-rw-r--r--src/mongo/db/repl/database_task.h2
-rw-r--r--src/mongo/db/repl/elect_cmd_runner_test.cpp41
-rw-r--r--src/mongo/db/repl/freshness_checker_test.cpp284
-rw-r--r--src/mongo/db/repl/freshness_scanner.cpp5
-rw-r--r--src/mongo/db/repl/freshness_scanner_test.cpp39
-rw-r--r--src/mongo/db/repl/is_master_response.cpp27
-rw-r--r--src/mongo/db/repl/isself.cpp6
-rw-r--r--src/mongo/db/repl/master_slave.cpp4
-rw-r--r--src/mongo/db/repl/member_config_test.cpp181
-rw-r--r--src/mongo/db/repl/old_update_position_args.cpp6
-rw-r--r--src/mongo/db/repl/oplog.cpp87
-rw-r--r--src/mongo/db/repl/oplog.h2
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp41
-rw-r--r--src/mongo/db/repl/oplog_fetcher_test.cpp21
-rw-r--r--src/mongo/db/repl/oplog_interface_local.cpp4
-rw-r--r--src/mongo/db/repl/oplog_interface_mock.h2
-rw-r--r--src/mongo/db/repl/optime_extract_test.cpp3
-rw-r--r--src/mongo/db/repl/read_concern_args.cpp3
-rw-r--r--src/mongo/db/repl/read_concern_args_test.cpp116
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response.cpp18
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response_test.cpp12
-rw-r--r--src/mongo/db/repl/repl_set_html_summary.cpp2
-rw-r--r--src/mongo/db/repl/repl_settings.h2
-rw-r--r--src/mongo/db/repl/replica_set_config.cpp86
-rw-r--r--src/mongo/db/repl/replica_set_config_checks.cpp46
-rw-r--r--src/mongo/db/repl/replica_set_config_checks_test.cpp414
-rw-r--r--src/mongo/db/repl/replica_set_config_test.cpp1236
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp19
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp74
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp176
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp286
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp16
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp24
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp133
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp114
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp1711
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp36
-rw-r--r--src/mongo/db/repl/replication_executor.cpp4
-rw-r--r--src/mongo/db/repl/replication_executor_test.cpp111
-rw-r--r--src/mongo/db/repl/replication_executor_test_fixture.h2
-rw-r--r--src/mongo/db/repl/replset_commands.cpp11
-rw-r--r--src/mongo/db/repl/replset_web_handler.cpp2
-rw-r--r--src/mongo/db/repl/reporter_test.cpp20
-rw-r--r--src/mongo/db/repl/resync.cpp2
-rw-r--r--src/mongo/db/repl/roll_back_local_operations_test.cpp10
-rw-r--r--src/mongo/db/repl/rollback_checker.cpp94
-rw-r--r--src/mongo/db/repl/rollback_source_impl.cpp2
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp7
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp38
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp95
-rw-r--r--src/mongo/db/repl/rs_sync.cpp2
-rw-r--r--src/mongo/db/repl/rs_sync.h2
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp6
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp38
-rw-r--r--src/mongo/db/repl/sync_tail.cpp9
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp76
-rw-r--r--src/mongo/db/repl/task_runner.cpp2
-rw-r--r--src/mongo/db/repl/topology_coordinator.h2
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.cpp32
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_test.cpp1584
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp1456
-rw-r--r--src/mongo/db/repl/update_position_args.cpp2
-rw-r--r--src/mongo/db/repl/vote_requester_test.cpp80
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp2
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp4
-rw-r--r--src/mongo/db/s/collection_metadata.cpp3
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp45
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp14
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp2
-rw-r--r--src/mongo/db/s/metadata_loader_test.cpp3
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp22
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp43
-rw-r--r--src/mongo/db/s/migration_session_id_test.cpp5
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp127
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp7
-rw-r--r--src/mongo/db/s/move_timing_helper.cpp4
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp19
-rw-r--r--src/mongo/db/s/sharding_state.cpp17
-rw-r--r--src/mongo/db/s/sharding_state_recovery.cpp2
-rw-r--r--src/mongo/db/s/sharding_state_test.cpp10
-rw-r--r--src/mongo/db/s/split_chunk_command.cpp7
-rw-r--r--src/mongo/db/s/start_chunk_clone_request_test.cpp7
-rw-r--r--src/mongo/db/s/type_shard_identity_test.cpp18
-rw-r--r--src/mongo/db/server_options_helpers.cpp220
-rw-r--r--src/mongo/db/server_parameters.h2
-rw-r--r--src/mongo/db/service_context_d.cpp21
-rw-r--r--src/mongo/db/service_context_noop.cpp2
-rw-r--r--src/mongo/db/sorter/sorter.cpp34
-rw-r--r--src/mongo/db/sorter/sorter.h6
-rw-r--r--src/mongo/db/sorter/sorter_test.cpp6
-rw-r--r--src/mongo/db/startup_warnings_common.cpp3
-rw-r--r--src/mongo/db/startup_warnings_mongod.cpp16
-rw-r--r--src/mongo/db/stats/counters.h4
-rw-r--r--src/mongo/db/stats/timer_stats_test.cpp2
-rw-r--r--src/mongo/db/storage/devnull/devnull_init.cpp2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp2
-rw-r--r--src/mongo/db/storage/key_string.cpp21
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp5
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp5
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/hashtab.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.cpp8
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file_sync.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp13
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_commitjob.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal.cpp10
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp13
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/durop.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/durop.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/extent.cpp10
-rw-r--r--src/mongo/db/storage/mmap_v1/file_allocator.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/logfile.cpp8
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_posix.cpp11
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp21
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp12
-rw-r--r--src/mongo/db/storage/mmap_v1/record_access_tracker.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp25
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp17
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp9
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp13
-rw-r--r--src/mongo/db/storage/paths.cpp8
-rw-r--r--src/mongo/db/storage/paths.h4
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp58
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp33
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp19
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.cpp8
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_posix.cpp21
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_test.cpp4
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_windows.cpp11
-rw-r--r--src/mongo/db/storage/storage_engine_metadata.cpp29
-rw-r--r--src/mongo/db/storage/storage_engine_metadata_test.cpp2
-rw-r--r--src/mongo/db/storage/storage_init.cpp8
-rw-r--r--src/mongo/db/storage/storage_options.cpp3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp72
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp11
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp16
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp7
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp33
-rw-r--r--src/mongo/db/ttl.cpp2
-rw-r--r--src/mongo/db/update_index_data.cpp2
-rw-r--r--src/mongo/db/write_concern.cpp8
493 files changed, 11346 insertions, 6947 deletions
diff --git a/src/mongo/db/auth/action_set.cpp b/src/mongo/db/auth/action_set.cpp
index 7d3dc9f1712..924ec1e1439 100644
--- a/src/mongo/db/auth/action_set.cpp
+++ b/src/mongo/db/auth/action_set.cpp
@@ -37,8 +37,8 @@
#include "mongo/base/status.h"
#include "mongo/bson/util/builder.h"
#include "mongo/util/log.h"
-#include "mongo/util/stringutils.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/stringutils.h"
namespace mongo {
@@ -97,9 +97,9 @@ Status ActionSet::parseActionSetFromString(const std::string& actionsString, Act
}
std::string unrecognizedActionsString;
joinStringDelim(unrecognizedActions, &unrecognizedActionsString, ',');
- return Status(
- ErrorCodes::FailedToParse,
- str::stream() << "Unrecognized action privilege strings: " << unrecognizedActionsString);
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "Unrecognized action privilege strings: "
+ << unrecognizedActionsString);
}
Status ActionSet::parseActionSetFromStringVector(const std::vector<std::string>& actionsVector,
diff --git a/src/mongo/db/auth/auth_decorations.cpp b/src/mongo/db/auth/auth_decorations.cpp
index 2bd2264e0f9..60b148d1ad0 100644
--- a/src/mongo/db/auth/auth_decorations.cpp
+++ b/src/mongo/db/auth/auth_decorations.cpp
@@ -34,8 +34,8 @@
#include "mongo/base/init.h"
#include "mongo/db/auth/authentication_session.h"
#include "mongo/db/auth/authorization_manager.h"
-#include "mongo/db/auth/authz_manager_external_state.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/authz_manager_external_state.h"
#include "mongo/db/client.h"
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
diff --git a/src/mongo/db/auth/auth_index_d.cpp b/src/mongo/db/auth/auth_index_d.cpp
index ac3d38ebf03..8f45cfda0e3 100644
--- a/src/mongo/db/auth/auth_index_d.cpp
+++ b/src/mongo/db/auth/auth_index_d.cpp
@@ -61,16 +61,20 @@ std::string v3SystemRolesIndexName;
MONGO_INITIALIZER(AuthIndexKeyPatterns)(InitializerContext*) {
v1SystemUsersKeyPattern = BSON("user" << 1 << "userSource" << 1);
- v3SystemUsersKeyPattern = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
- v3SystemRolesKeyPattern = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
+ v3SystemUsersKeyPattern = BSON(
+ AuthorizationManager::USER_NAME_FIELD_NAME << 1 << AuthorizationManager::USER_DB_FIELD_NAME
+ << 1);
+ v3SystemRolesKeyPattern = BSON(
+ AuthorizationManager::ROLE_NAME_FIELD_NAME << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << 1);
v3SystemUsersIndexName =
std::string(str::stream() << AuthorizationManager::USER_NAME_FIELD_NAME << "_1_"
- << AuthorizationManager::USER_DB_FIELD_NAME << "_1");
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << "_1");
v3SystemRolesIndexName =
std::string(str::stream() << AuthorizationManager::ROLE_NAME_FIELD_NAME << "_1_"
- << AuthorizationManager::ROLE_DB_FIELD_NAME << "_1");
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << "_1");
return Status::OK();
}
@@ -113,12 +117,16 @@ void createSystemIndexes(OperationContext* txn, Collection* collection) {
collection->getIndexCatalog()->createIndexOnEmptyCollection(
txn,
BSON("name" << v3SystemUsersIndexName << "ns" << collection->ns().ns() << "key"
- << v3SystemUsersKeyPattern << "unique" << true));
+ << v3SystemUsersKeyPattern
+ << "unique"
+ << true));
} else if (ns == AuthorizationManager::rolesCollectionNamespace) {
collection->getIndexCatalog()->createIndexOnEmptyCollection(
txn,
BSON("name" << v3SystemRolesIndexName << "ns" << collection->ns().ns() << "key"
- << v3SystemRolesKeyPattern << "unique" << true));
+ << v3SystemRolesKeyPattern
+ << "unique"
+ << true));
}
}
diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp
index df65782689e..046ed24a1bc 100644
--- a/src/mongo/db/auth/authorization_manager.cpp
+++ b/src/mongo/db/auth/authorization_manager.cpp
@@ -69,8 +69,8 @@ using std::vector;
AuthInfo internalSecurity;
-MONGO_INITIALIZER_WITH_PREREQUISITES(SetupInternalSecurityUser,
- MONGO_NO_PREREQUISITES)(InitializerContext* context) {
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetupInternalSecurityUser, MONGO_NO_PREREQUISITES)
+(InitializerContext* context) {
User* user = new User(UserName("__system", "local"));
user->incrementRefCount(); // Pin this user so the ref count never drops below 1.
@@ -381,7 +381,8 @@ Status AuthorizationManager::_initializeUserFromPrivilegeDocument(User* user,
mongoutils::str::stream() << "User name from privilege document \""
<< userName
<< "\" doesn't match name of provided User \""
- << user->getName().getUser() << "\"",
+ << user->getName().getUser()
+ << "\"",
0);
}
@@ -484,7 +485,8 @@ Status AuthorizationManager::acquireUser(OperationContext* txn,
case schemaVersion24:
status = Status(ErrorCodes::AuthSchemaIncompatible,
mongoutils::str::stream()
- << "Authorization data schema version " << schemaVersion24
+ << "Authorization data schema version "
+ << schemaVersion24
<< " not supported after MongoDB version 2.6.");
break;
}
@@ -669,7 +671,8 @@ StatusWith<UserName> extractUserNameFromIdString(StringData idstr) {
return StatusWith<UserName>(ErrorCodes::FailedToParse,
mongoutils::str::stream()
<< "_id entries for user documents must be of "
- "the form <dbname>.<username>. Found: " << idstr);
+ "the form <dbname>.<username>. Found: "
+ << idstr);
}
return StatusWith<UserName>(
UserName(idstr.substr(splitPoint + 1), idstr.substr(0, splitPoint)));
@@ -702,7 +705,8 @@ void AuthorizationManager::_invalidateRelevantCacheData(const char* op,
if (!userName.isOK()) {
warning() << "Invalidating user cache based on user being updated failed, will "
- "invalidate the entire cache instead: " << userName.getStatus() << endl;
+ "invalidate the entire cache instead: "
+ << userName.getStatus() << endl;
invalidateUserCache();
return;
}
diff --git a/src/mongo/db/auth/authorization_manager_global.cpp b/src/mongo/db/auth/authorization_manager_global.cpp
index 2fc20deef25..9dd114247cb 100644
--- a/src/mongo/db/auth/authorization_manager_global.cpp
+++ b/src/mongo/db/auth/authorization_manager_global.cpp
@@ -50,7 +50,8 @@ public:
MONGO_INITIALIZER_GENERAL(AuthzSchemaParameter,
MONGO_NO_PREREQUISITES,
- ("BeginStartupOptionParsing"))(InitializerContext*) {
+ ("BeginStartupOptionParsing"))
+(InitializerContext*) {
new AuthzVersionParameter(ServerParameterSet::getGlobal(), authSchemaVersionServerParameter);
return Status::OK();
}
diff --git a/src/mongo/db/auth/authorization_manager_test.cpp b/src/mongo/db/auth/authorization_manager_test.cpp
index 2faf6d0fe10..cd2b83fa6b1 100644
--- a/src/mongo/db/auth/authorization_manager_test.cpp
+++ b/src/mongo/db/auth/authorization_manager_test.cpp
@@ -34,10 +34,10 @@
#include "mongo/bson/mutable/document.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
-#include "mongo/db/auth/authz_session_external_state_mock.h"
-#include "mongo/db/auth/authz_manager_external_state_mock.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/authz_manager_external_state_mock.h"
+#include "mongo/db/auth/authz_session_external_state_mock.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context_noop.h"
@@ -175,36 +175,38 @@ public:
TEST_F(AuthorizationManagerTest, testAcquireV2User) {
OperationContextNoop txn;
- ASSERT_OK(
- externalState->insertPrivilegeDocument(&txn,
- BSON("_id"
- << "admin.v2read"
- << "user"
- << "v2read"
- << "db"
- << "test"
- << "credentials" << BSON("MONGODB-CR"
- << "password")
- << "roles" << BSON_ARRAY(BSON("role"
- << "read"
- << "db"
- << "test"))),
- BSONObj()));
- ASSERT_OK(
- externalState->insertPrivilegeDocument(&txn,
- BSON("_id"
- << "admin.v2cluster"
- << "user"
- << "v2cluster"
- << "db"
- << "admin"
- << "credentials" << BSON("MONGODB-CR"
- << "password")
- << "roles" << BSON_ARRAY(BSON("role"
- << "clusterAdmin"
- << "db"
- << "admin"))),
- BSONObj()));
+ ASSERT_OK(externalState->insertPrivilegeDocument(&txn,
+ BSON("_id"
+ << "admin.v2read"
+ << "user"
+ << "v2read"
+ << "db"
+ << "test"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "password")
+ << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "read"
+ << "db"
+ << "test"))),
+ BSONObj()));
+ ASSERT_OK(externalState->insertPrivilegeDocument(&txn,
+ BSON("_id"
+ << "admin.v2cluster"
+ << "user"
+ << "v2cluster"
+ << "db"
+ << "admin"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "password")
+ << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "clusterAdmin"
+ << "db"
+ << "admin"))),
+ BSONObj()));
User* v2read;
ASSERT_OK(authzManager->acquireUser(&txn, UserName("v2read", "test"), &v2read));
@@ -260,13 +262,13 @@ public:
private:
Status _getUserDocument(OperationContext* txn, const UserName& userName, BSONObj* userDoc) {
- Status status =
- findOne(txn,
- AuthorizationManager::usersCollectionNamespace,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << userName.getUser() << AuthorizationManager::USER_DB_FIELD_NAME
- << userName.getDB()),
- userDoc);
+ Status status = findOne(txn,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << userName.getUser()
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getDB()),
+ userDoc);
if (status == ErrorCodes::NoMatchingDocument) {
status = Status(ErrorCodes::UserNotFound,
mongoutils::str::stream() << "Could not find user "
@@ -301,27 +303,33 @@ public:
TEST_F(AuthorizationManagerTest, testAcquireV2UserWithUnrecognizedActions) {
OperationContextNoop txn;
- ASSERT_OK(externalState->insertPrivilegeDocument(
- &txn,
- BSON("_id"
- << "admin.myUser"
- << "user"
- << "myUser"
- << "db"
- << "test"
- << "credentials" << BSON("MONGODB-CR"
- << "password") << "roles" << BSON_ARRAY(BSON("role"
- << "myRole"
- << "db"
- << "test"))
- << "inheritedPrivileges" << BSON_ARRAY(BSON("resource" << BSON("db"
- << "test"
- << "collection"
- << "") << "actions"
- << BSON_ARRAY("find"
- << "fakeAction"
- << "insert")))),
- BSONObj()));
+ ASSERT_OK(
+ externalState->insertPrivilegeDocument(&txn,
+ BSON("_id"
+ << "admin.myUser"
+ << "user"
+ << "myUser"
+ << "db"
+ << "test"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "password")
+ << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "myRole"
+ << "db"
+ << "test"))
+ << "inheritedPrivileges"
+ << BSON_ARRAY(BSON(
+ "resource" << BSON("db"
+ << "test"
+ << "collection"
+ << "")
+ << "actions"
+ << BSON_ARRAY("find"
+ << "fakeAction"
+ << "insert")))),
+ BSONObj()));
User* myUser;
ASSERT_OK(authzManager->acquireUser(&txn, UserName("myUser", "test"), &myUser));
diff --git a/src/mongo/db/auth/authorization_session.cpp b/src/mongo/db/auth/authorization_session.cpp
index c2474ac5199..7a620253cc6 100644
--- a/src/mongo/db/auth/authorization_session.cpp
+++ b/src/mongo/db/auth/authorization_session.cpp
@@ -38,8 +38,8 @@
#include "mongo/base/status.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
-#include "mongo/db/auth/authz_session_external_state.h"
#include "mongo/db/auth/authorization_manager.h"
+#include "mongo/db/auth/authz_session_external_state.h"
#include "mongo/db/auth/privilege.h"
#include "mongo/db/auth/security_key.h"
#include "mongo/db/auth/user_management_commands_parser.h"
@@ -338,7 +338,8 @@ Status AuthorizationSession::checkAuthorizedToGrantPrivilege(const Privilege& pr
ActionType::grantRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to grant privileges on the "
- << resource.databaseToMatch() << "database");
+ << resource.databaseToMatch()
+ << "database");
}
} else if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName("admin"),
ActionType::grantRole)) {
@@ -358,7 +359,8 @@ Status AuthorizationSession::checkAuthorizedToRevokePrivilege(const Privilege& p
ActionType::revokeRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to revoke privileges on the "
- << resource.databaseToMatch() << "database");
+ << resource.databaseToMatch()
+ << "database");
}
} else if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName("admin"),
ActionType::revokeRole)) {
diff --git a/src/mongo/db/auth/authorization_session_test.cpp b/src/mongo/db/auth/authorization_session_test.cpp
index 8dfc448910f..9098c5a8e1f 100644
--- a/src/mongo/db/auth/authorization_session_test.cpp
+++ b/src/mongo/db/auth/authorization_session_test.cpp
@@ -31,10 +31,10 @@
* Unit tests of the AuthorizationSession type.
*/
#include "mongo/base/status.h"
-#include "mongo/db/auth/authz_session_external_state_mock.h"
-#include "mongo/db/auth/authz_manager_external_state_mock.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/authz_manager_external_state_mock.h"
+#include "mongo/db/auth/authz_session_external_state_mock.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context_noop.h"
@@ -144,8 +144,10 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -171,8 +173,10 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
<< "admin"
<< "db"
<< "admin"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWriteAnyDatabase"
<< "db"
@@ -216,8 +220,10 @@ TEST_F(AuthorizationSessionTest, DuplicateRolesOK) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -247,8 +253,10 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "rw"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -263,8 +271,10 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "useradmin"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "userAdmin"
<< "db"
@@ -276,8 +286,10 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "rwany"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWriteAnyDatabase"
<< "db"
@@ -293,8 +305,10 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "useradminany"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "userAdminAnyDatabase"
<< "db"
@@ -387,8 +401,10 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -413,8 +429,10 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
@@ -452,8 +470,10 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -479,8 +499,10 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp
index 601c14decff..bd24c6c5b19 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp
@@ -87,7 +87,8 @@ Status AuthzManagerExternalStateMongod::findOne(OperationContext* txn,
}
return Status(ErrorCodes::NoMatchingDocument,
mongoutils::str::stream() << "No document in " << collectionName.ns()
- << " matches " << query);
+ << " matches "
+ << query);
}
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index 5d76027fc22..82bd5c29440 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -53,7 +53,8 @@ Status AuthzManagerExternalStateLocal::initialize(OperationContext* txn) {
<< status.reason();
} else {
error() << "Could not generate role graph from admin.system.roles; "
- "only system roles available: " << status;
+ "only system roles available: "
+ << status;
}
}
@@ -81,8 +82,11 @@ Status AuthzManagerExternalStateLocal::getStoredAuthorizationVersion(OperationCo
return Status(ErrorCodes::TypeMismatch,
mongoutils::str::stream()
<< "Could not determine schema version of authorization data. "
- "Bad (non-numeric) type " << typeName(versionElement.type())
- << " (" << versionElement.type() << ") for "
+ "Bad (non-numeric) type "
+ << typeName(versionElement.type())
+ << " ("
+ << versionElement.type()
+ << ") for "
<< AuthorizationManager::schemaVersionFieldName
<< " field in version document");
}
@@ -123,7 +127,8 @@ void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privileges
std::string(mongoutils::str::stream()
<< "Skipped privileges on resource "
<< privileges[i].getResourcePattern().toString()
- << ". Reason: " << errmsg)));
+ << ". Reason: "
+ << errmsg)));
}
}
}
@@ -222,7 +227,8 @@ Status AuthzManagerExternalStateLocal::_getUserDocument(OperationContext* txn,
Status status = findOne(txn,
AuthorizationManager::usersCollectionNamespace,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << userName.getUser() << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getUser()
+ << AuthorizationManager::USER_DB_FIELD_NAME
<< userName.getDB()),
userDoc);
if (status == ErrorCodes::NoMatchingDocument) {
@@ -324,7 +330,8 @@ void addRoleFromDocumentOrWarn(RoleGraph* roleGraph, const BSONObj& doc) {
Status status = roleGraph->addRoleFromDocument(doc);
if (!status.isOK()) {
warning() << "Skipping invalid admin.system.roles document while calculating privileges"
- " for user-defined roles: " << status << "; document " << doc;
+ " for user-defined roles: "
+ << status << "; document " << doc;
}
}
@@ -352,7 +359,8 @@ Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* tx
RoleGraphState newState;
if (status == ErrorCodes::GraphContainsCycle) {
error() << "Inconsistent role graph during authorization manager initialization. Only "
- "direct privileges available. " << status.reason();
+ "direct privileges available. "
+ << status.reason();
newState = roleGraphStateHasCycle;
status = Status::OK();
} else if (status.isOK()) {
@@ -400,8 +408,8 @@ public:
if (_isO2Set)
oplogEntryBuilder << "o2" << _o2;
error() << "Unsupported modification to roles collection in oplog; "
- "restart this process to reenable user-defined roles; " << status.reason()
- << "; Oplog entry: " << oplogEntryBuilder.done();
+ "restart this process to reenable user-defined roles; "
+ << status.reason() << "; Oplog entry: " << oplogEntryBuilder.done();
} else if (!status.isOK()) {
warning() << "Skipping bad update to roles collection in oplog. " << status
<< " Oplog entry: " << _op;
@@ -410,8 +418,8 @@ public:
if (status == ErrorCodes::GraphContainsCycle) {
_externalState->_roleGraphState = _externalState->roleGraphStateHasCycle;
error() << "Inconsistent role graph during authorization manager initialization. "
- "Only direct privileges available. " << status.reason()
- << " after applying oplog entry " << _op;
+ "Only direct privileges available. "
+ << status.reason() << " after applying oplog entry " << _op;
} else {
fassert(17183, status);
_externalState->_roleGraphState = _externalState->roleGraphStateConsistent;
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index 6bab48f91e9..6c2fe3f9398 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -77,7 +77,8 @@ void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privileges
std::string(mongoutils::str::stream()
<< "Skipped privileges on resource "
<< privileges[i].getResourcePattern().toString()
- << ". Reason: " << errmsg)));
+ << ". Reason: "
+ << errmsg)));
}
}
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.h b/src/mongo/db/auth/authz_manager_external_state_mock.h
index d6b457e0de9..0b8fa3e0b3c 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.h
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.h
@@ -28,8 +28,8 @@
#pragma once
-#include <string>
#include <map>
+#include <string>
#include <vector>
#include "mongo/base/disallow_copying.h"
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.cpp b/src/mongo/db/auth/authz_manager_external_state_s.cpp
index 48800c500c4..4bdb2648688 100644
--- a/src/mongo/db/auth/authz_manager_external_state_s.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_s.cpp
@@ -69,8 +69,8 @@ Status AuthzManagerExternalStateMongos::getStoredAuthorizationVersion(OperationC
// that runs this command
BSONObj getParameterCmd = BSON("getParameter" << 1 << authSchemaVersionServerParameter << 1);
BSONObjBuilder builder;
- const bool ok = grid.catalogManager(txn)
- ->runUserManagementReadCommand(txn, "admin", getParameterCmd, &builder);
+ const bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ txn, "admin", getParameterCmd, &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -92,11 +92,14 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* txn
BSON("usersInfo" << BSON_ARRAY(BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< userName.getUser()
<< AuthorizationManager::USER_DB_FIELD_NAME
- << userName.getDB())) << "showPrivileges" << true
- << "showCredentials" << true);
+ << userName.getDB()))
+ << "showPrivileges"
+ << true
+ << "showCredentials"
+ << true);
BSONObjBuilder builder;
- const bool ok = grid.catalogManager(txn)
- ->runUserManagementReadCommand(txn, "admin", usersInfoCmd, &builder);
+ const bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ txn, "admin", usersInfoCmd, &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -110,7 +113,9 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* txn
if (foundUsers.size() > 1) {
return Status(ErrorCodes::UserDataInconsistent,
str::stream() << "Found multiple users on the \"" << userName.getDB()
- << "\" database with name \"" << userName.getUser() << "\"");
+ << "\" database with name \""
+ << userName.getUser()
+ << "\"");
}
*result = foundUsers[0].Obj().getOwned();
return Status::OK();
@@ -121,13 +126,15 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(OperationContext* txn
bool showPrivileges,
BSONObj* result) {
BSONObj rolesInfoCmd =
- BSON("rolesInfo" << BSON_ARRAY(BSON(
- AuthorizationManager::ROLE_NAME_FIELD_NAME
- << roleName.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
- << roleName.getDB())) << "showPrivileges" << showPrivileges);
+ BSON("rolesInfo" << BSON_ARRAY(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB()))
+ << "showPrivileges"
+ << showPrivileges);
BSONObjBuilder builder;
- const bool ok = grid.catalogManager(txn)
- ->runUserManagementReadCommand(txn, "admin", rolesInfoCmd, &builder);
+ const bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ txn, "admin", rolesInfoCmd, &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -141,7 +148,9 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(OperationContext* txn
if (foundRoles.size() > 1) {
return Status(ErrorCodes::RoleDataInconsistent,
str::stream() << "Found multiple roles on the \"" << roleName.getDB()
- << "\" database with name \"" << roleName.getRole() << "\"");
+ << "\" database with name \""
+ << roleName.getRole()
+ << "\"");
}
*result = foundRoles[0].Obj().getOwned();
return Status::OK();
@@ -152,8 +161,9 @@ Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(OperationContex
bool showPrivileges,
bool showBuiltinRoles,
std::vector<BSONObj>* result) {
- BSONObj rolesInfoCmd = BSON("rolesInfo" << 1 << "showPrivileges" << showPrivileges
- << "showBuiltinRoles" << showBuiltinRoles);
+ BSONObj rolesInfoCmd =
+ BSON("rolesInfo" << 1 << "showPrivileges" << showPrivileges << "showBuiltinRoles"
+ << showBuiltinRoles);
BSONObjBuilder builder;
const bool ok =
grid.catalogManager(txn)->runUserManagementReadCommand(txn, dbname, rolesInfoCmd, &builder);
@@ -170,8 +180,8 @@ Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(OperationContex
bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext* txn) {
BSONObj usersInfoCmd = BSON("usersInfo" << 1);
BSONObjBuilder userBuilder;
- bool ok = grid.catalogManager(txn)
- ->runUserManagementReadCommand(txn, "admin", usersInfoCmd, &userBuilder);
+ bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ txn, "admin", usersInfoCmd, &userBuilder);
if (!ok) {
// If we were unable to complete the query,
// it's best to assume that there _are_ privilege documents. This might happen
@@ -188,8 +198,8 @@ bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext*
BSONObj rolesInfoCmd = BSON("rolesInfo" << 1);
BSONObjBuilder roleBuilder;
- ok = grid.catalogManager(txn)
- ->runUserManagementReadCommand(txn, "admin", rolesInfoCmd, &roleBuilder);
+ ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ txn, "admin", rolesInfoCmd, &roleBuilder);
if (!ok) {
return true;
}
diff --git a/src/mongo/db/auth/authz_session_external_state_server_common.cpp b/src/mongo/db/auth/authz_session_external_state_server_common.cpp
index a85ab1c5ac2..16fb107f2f3 100644
--- a/src/mongo/db/auth/authz_session_external_state_server_common.cpp
+++ b/src/mongo/db/auth/authz_session_external_state_server_common.cpp
@@ -69,7 +69,8 @@ void AuthzSessionExternalStateServerCommon::_checkShouldAllowLocalhost(Operation
if (_allowLocalhost) {
ONCE {
log() << "note: no users configured in admin.system.users, allowing localhost "
- "access" << std::endl;
+ "access"
+ << std::endl;
}
}
}
diff --git a/src/mongo/db/auth/native_sasl_authentication_session.cpp b/src/mongo/db/auth/native_sasl_authentication_session.cpp
index 9566ba37487..9e21ffe8d9b 100644
--- a/src/mongo/db/auth/native_sasl_authentication_session.cpp
+++ b/src/mongo/db/auth/native_sasl_authentication_session.cpp
@@ -37,7 +37,6 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/sasl_client_authenticate.h"
-#include "mongo/db/commands.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/authorization_session.h"
@@ -46,6 +45,7 @@
#include "mongo/db/auth/sasl_options.h"
#include "mongo/db/auth/sasl_plain_server_conversation.h"
#include "mongo/db/auth/sasl_scramsha1_server_conversation.h"
+#include "mongo/db/commands.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/auth/privilege_parser_test.cpp b/src/mongo/db/auth/privilege_parser_test.cpp
index 1192e911386..74bace49c7e 100644
--- a/src/mongo/db/auth/privilege_parser_test.cpp
+++ b/src/mongo/db/auth/privilege_parser_test.cpp
@@ -51,23 +51,28 @@ TEST(PrivilegeParserTest, IsValidTest) {
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
// resource can't have cluster as well as db or collection
- parsedPrivilege.parseBSON(
- BSON("resource" << BSON("cluster" << true << "db"
- << ""
- << "collection"
- << "") << "actions" << BSON_ARRAY("find")),
- &errmsg);
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("cluster" << true << "db"
+ << ""
+ << "collection"
+ << "")
+ << "actions"
+ << BSON_ARRAY("find")),
+ &errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
// resource can't have db without collection
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
- << "") << "actions" << BSON_ARRAY("find")),
+ << "")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
// resource can't have collection without db
parsedPrivilege.parseBSON(BSON("resource" << BSON("collection"
- << "") << "actions" << BSON_ARRAY("find")),
+ << "")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
@@ -75,7 +80,9 @@ TEST(PrivilegeParserTest, IsValidTest) {
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< ""
<< "collection"
- << "") << "actions" << BSON_ARRAY("find")),
+ << "")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
@@ -83,7 +90,9 @@ TEST(PrivilegeParserTest, IsValidTest) {
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< "test"
<< "collection"
- << "foo") << "actions" << BSON_ARRAY("find")),
+ << "foo")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
@@ -105,7 +114,9 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< ""
<< "collection"
- << "") << "actions" << BSON_ARRAY("find")),
+ << "")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -130,7 +141,9 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< "test"
<< "collection"
- << "foo") << "actions" << BSON_ARRAY("find")),
+ << "foo")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -156,7 +169,9 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< "test"
<< "collection"
- << "") << "actions" << BSON_ARRAY("find")),
+ << "")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -181,7 +196,9 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< ""
<< "collection"
- << "foo") << "actions" << BSON_ARRAY("find")),
+ << "foo")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -232,13 +249,14 @@ TEST(PrivilegeParserTest, ParseInvalidActionsTest) {
std::vector<std::string> unrecognizedActions;
actionsVector.push_back("find");
- parsedPrivilege.parseBSON(
- BSON("resource" << BSON("db"
- << ""
- << "collection"
- << "") << "actions" << BSON_ARRAY("find"
- << "fakeAction")),
- &errmsg);
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
+ << ""
+ << "collection"
+ << "")
+ << "actions"
+ << BSON_ARRAY("find"
+ << "fakeAction")),
+ &errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
parsedPrivilege, &privilege, &unrecognizedActions));
diff --git a/src/mongo/db/auth/role_graph.cpp b/src/mongo/db/auth/role_graph.cpp
index a0861b98236..15e8fc87646 100644
--- a/src/mongo/db/auth/role_graph.cpp
+++ b/src/mongo/db/auth/role_graph.cpp
@@ -119,8 +119,8 @@ Status RoleGraph::deleteRole(const RoleName& role) {
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot delete built-in role: " << role.getFullName(),
+ mongoutils::str::stream() << "Cannot delete built-in role: "
+ << role.getFullName(),
0);
}
@@ -183,8 +183,8 @@ Status RoleGraph::addRoleToRole(const RoleName& recipient, const RoleName& role)
}
if (isBuiltinRole(recipient)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot grant roles to built-in role: " << role.getFullName());
+ mongoutils::str::stream() << "Cannot grant roles to built-in role: "
+ << role.getFullName());
}
if (!roleExists(role)) {
return Status(ErrorCodes::RoleNotFound,
@@ -212,8 +212,8 @@ Status RoleGraph::removeRoleFromRole(const RoleName& recipient, const RoleName&
}
if (isBuiltinRole(recipient)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot remove roles from built-in role: " << role.getFullName(),
+ mongoutils::str::stream() << "Cannot remove roles from built-in role: "
+ << role.getFullName(),
0);
}
if (!roleExists(role)) {
@@ -252,8 +252,8 @@ Status RoleGraph::removeAllRolesFromRole(const RoleName& victim) {
}
if (isBuiltinRole(victim)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot remove roles from built-in role: " << victim.getFullName(),
+ mongoutils::str::stream() << "Cannot remove roles from built-in role: "
+ << victim.getFullName(),
0);
}
@@ -281,8 +281,8 @@ Status RoleGraph::addPrivilegeToRole(const RoleName& role, const Privilege& priv
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot grant privileges to built-in role: " << role.getFullName(),
+ mongoutils::str::stream() << "Cannot grant privileges to built-in role: "
+ << role.getFullName(),
0);
}
@@ -308,8 +308,8 @@ Status RoleGraph::addPrivilegesToRole(const RoleName& role,
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot grant privileges to built-in role: " << role.getFullName(),
+ mongoutils::str::stream() << "Cannot grant privileges to built-in role: "
+ << role.getFullName(),
0);
}
@@ -330,8 +330,8 @@ Status RoleGraph::removePrivilegeFromRole(const RoleName& role,
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot remove privileges from built-in role: " << role.getFullName());
+ mongoutils::str::stream() << "Cannot remove privileges from built-in role: "
+ << role.getFullName());
}
PrivilegeVector& currentPrivileges = _directPrivilegesForRole[role];
@@ -343,13 +343,14 @@ Status RoleGraph::removePrivilegeFromRole(const RoleName& role,
if (!curActions.isSupersetOf(privilegeToRemove.getActions())) {
// Didn't possess all the actions being removed.
- return Status(ErrorCodes::PrivilegeNotFound,
- mongoutils::str::stream()
- << "Role: " << role.getFullName()
- << " does not contain a privilege on "
- << privilegeToRemove.getResourcePattern().toString()
- << " with actions: " << privilegeToRemove.getActions().toString(),
- 0);
+ return Status(
+ ErrorCodes::PrivilegeNotFound,
+ mongoutils::str::stream() << "Role: " << role.getFullName()
+ << " does not contain a privilege on "
+ << privilegeToRemove.getResourcePattern().toString()
+ << " with actions: "
+ << privilegeToRemove.getActions().toString(),
+ 0);
}
curPrivilege.removeActions(privilegeToRemove.getActions());
@@ -389,8 +390,8 @@ Status RoleGraph::removeAllPrivilegesFromRole(const RoleName& role) {
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot remove privileges from built-in role: " << role.getFullName());
+ mongoutils::str::stream() << "Cannot remove privileges from built-in role: "
+ << role.getFullName());
}
_directPrivilegesForRole[role].clear();
return Status::OK();
diff --git a/src/mongo/db/auth/role_graph_update.cpp b/src/mongo/db/auth/role_graph_update.cpp
index f9dd3b10efe..21be828753c 100644
--- a/src/mongo/db/auth/role_graph_update.cpp
+++ b/src/mongo/db/auth/role_graph_update.cpp
@@ -87,7 +87,9 @@ Status checkIdMatchesRoleName(const BSONElement& idElement, const RoleName& role
return Status(ErrorCodes::FailedToParse,
mongoutils::str::stream()
<< "Role document _id fields must be encoded as the string "
- "dbname.rolename. Found " << idField << " for "
+ "dbname.rolename. Found "
+ << idField
+ << " for "
<< roleName.getFullName());
}
return Status::OK();
diff --git a/src/mongo/db/auth/sasl_authentication_session.cpp b/src/mongo/db/auth/sasl_authentication_session.cpp
index c74bba6fadb..c64e4be8100 100644
--- a/src/mongo/db/auth/sasl_authentication_session.cpp
+++ b/src/mongo/db/auth/sasl_authentication_session.cpp
@@ -36,12 +36,12 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/sasl_client_authenticate.h"
-#include "mongo/db/commands.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/authz_manager_external_state_mock.h"
#include "mongo/db/auth/authz_session_external_state_mock.h"
+#include "mongo/db/commands.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/auth/sasl_options.cpp b/src/mongo/db/auth/sasl_options.cpp
index 69bfb504e83..fdb64f044e2 100644
--- a/src/mongo/db/auth/sasl_options.cpp
+++ b/src/mongo/db/auth/sasl_options.cpp
@@ -60,27 +60,31 @@ SASLGlobalParams::SASLGlobalParams() {
Status addSASLOptions(moe::OptionSection* options) {
moe::OptionSection saslOptions("SASL Options");
- saslOptions.addOptionChaining("security.authenticationMechanisms",
- "",
- moe::StringVector,
- "List of supported authentication mechanisms. "
- "Default is MONGODB-CR, SCRAM-SHA-1 and MONGODB-X509.")
+ saslOptions
+ .addOptionChaining("security.authenticationMechanisms",
+ "",
+ moe::StringVector,
+ "List of supported authentication mechanisms. "
+ "Default is MONGODB-CR, SCRAM-SHA-1 and MONGODB-X509.")
.setSources(moe::SourceYAMLConfig);
- saslOptions.addOptionChaining(
- "security.sasl.hostName", "", moe::String, "Fully qualified server domain name")
+ saslOptions
+ .addOptionChaining(
+ "security.sasl.hostName", "", moe::String, "Fully qualified server domain name")
.setSources(moe::SourceYAMLConfig);
- saslOptions.addOptionChaining("security.sasl.serviceName",
- "",
- moe::String,
- "Registered name of the service using SASL")
+ saslOptions
+ .addOptionChaining("security.sasl.serviceName",
+ "",
+ moe::String,
+ "Registered name of the service using SASL")
.setSources(moe::SourceYAMLConfig);
- saslOptions.addOptionChaining("security.sasl.saslauthdSocketPath",
- "",
- moe::String,
- "Path to Unix domain socket file for saslauthd")
+ saslOptions
+ .addOptionChaining("security.sasl.saslauthdSocketPath",
+ "",
+ moe::String,
+ "Path to Unix domain socket file for saslauthd")
.setSources(moe::SourceYAMLConfig);
Status ret = options->addSection(saslOptions);
@@ -178,11 +182,11 @@ public:
virtual Status validate(const int& newValue) {
if (newValue < minimumScramIterationCount) {
- return Status(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Invalid value for SCRAM iteration count: " << newValue
- << " is less than the minimum SCRAM iteration count, "
- << minimumScramIterationCount);
+ return Status(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Invalid value for SCRAM iteration count: " << newValue
+ << " is less than the minimum SCRAM iteration count, "
+ << minimumScramIterationCount);
}
return Status::OK();
diff --git a/src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp b/src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp
index 9fd8496b7bc..ed812ddb27f 100644
--- a/src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp
+++ b/src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp
@@ -61,9 +61,9 @@ StatusWith<bool> SaslSCRAMSHA1ServerConversation::step(StringData inputData,
_step++;
if (_step > 3 || _step <= 0) {
- return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
- mongoutils::str::stream()
- << "Invalid SCRAM-SHA-1 authentication step: " << _step);
+ return StatusWith<bool>(
+ ErrorCodes::AuthenticationFailed,
+ mongoutils::str::stream() << "Invalid SCRAM-SHA-1 authentication step: " << _step);
}
if (_step == 1) {
return _firstStep(input, outputData);
@@ -109,8 +109,8 @@ StatusWith<bool> SaslSCRAMSHA1ServerConversation::_firstStep(std::vector<string>
*/
if (!str::startsWith(input[1], "a=") || input[1].size() < 3) {
return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 authzid: " << input[1]);
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 authzid: "
+ << input[1]);
}
authzId = input[1].substr(2);
input.erase(input.begin() + 1);
@@ -121,26 +121,29 @@ StatusWith<bool> SaslSCRAMSHA1ServerConversation::_firstStep(std::vector<string>
ErrorCodes::BadValue,
mongoutils::str::stream()
<< "Incorrect number of arguments for first SCRAM-SHA-1 client message, got "
- << input.size() << " expected 4");
+ << input.size()
+ << " expected 4");
} else if (input[0] != "n") {
return StatusWith<bool>(ErrorCodes::BadValue,
mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 client message prefix: " << input[0]);
+ << "Incorrect SCRAM-SHA-1 client message prefix: "
+ << input[0]);
} else if (!str::startsWith(input[1], "n=") || input[1].size() < 3) {
return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 user name: " << input[1]);
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 user name: "
+ << input[1]);
} else if (!str::startsWith(input[2], "r=") || input[2].size() < 6) {
return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 client nonce: " << input[2]);
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 client nonce: "
+ << input[2]);
}
_user = input[1].substr(2);
if (!authzId.empty() && _user != authzId) {
return StatusWith<bool>(ErrorCodes::BadValue,
mongoutils::str::stream() << "SCRAM-SHA-1 user name " << _user
- << " does not match authzid " << authzId);
+ << " does not match authzid "
+ << authzId);
}
decodeSCRAMUsername(_user);
@@ -237,19 +240,20 @@ StatusWith<bool> SaslSCRAMSHA1ServerConversation::_secondStep(const std::vector<
ErrorCodes::BadValue,
mongoutils::str::stream()
<< "Incorrect number of arguments for second SCRAM-SHA-1 client message, got "
- << input.size() << " expected 3");
+ << input.size()
+ << " expected 3");
} else if (!str::startsWith(input[0], "c=") || input[0].size() < 3) {
- return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 channel binding: " << input[0]);
+ return StatusWith<bool>(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 channel binding: " << input[0]);
} else if (!str::startsWith(input[1], "r=") || input[1].size() < 6) {
- return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 client|server nonce: " << input[1]);
+ return StatusWith<bool>(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 client|server nonce: " << input[1]);
} else if (!str::startsWith(input[2], "p=") || input[2].size() < 3) {
return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 ClientProof: " << input[2]);
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 ClientProof: "
+ << input[2]);
}
// add client-final-message-without-proof to authMessage
@@ -262,7 +266,9 @@ StatusWith<bool> SaslSCRAMSHA1ServerConversation::_secondStep(const std::vector<
ErrorCodes::BadValue,
mongoutils::str::stream()
<< "Unmatched SCRAM-SHA-1 nonce received from client in second step, expected "
- << _nonce << " but received " << nonce);
+ << _nonce
+ << " but received "
+ << nonce);
}
std::string clientProof = input[2].substr(2);
diff --git a/src/mongo/db/auth/security_file.cpp b/src/mongo/db/auth/security_file.cpp
index fd31a13a6f3..2538259bcae 100644
--- a/src/mongo/db/auth/security_file.cpp
+++ b/src/mongo/db/auth/security_file.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/auth/security_key.h"
-#include <sys/stat.h>
#include <string>
+#include <sys/stat.h>
#include "mongo/base/status_with.h"
#include "mongo/util/mongoutils/str.h"
@@ -92,9 +92,9 @@ StatusWith<std::string> readSecurityFile(const std::string& filename) {
if ((buf < 'A' || buf > 'Z') && (buf < 'a' || buf > 'z') && (buf < '0' || buf > '9') &&
buf != '+' && buf != '/' && buf != '=') {
fclose(file);
- return StatusWith<std::string>(ErrorCodes::UnsupportedFormat,
- str::stream() << "invalid char in key file " << filename
- << ": " << buf);
+ return StatusWith<std::string>(
+ ErrorCodes::UnsupportedFormat,
+ str::stream() << "invalid char in key file " << filename << ": " << buf);
}
str += buf;
diff --git a/src/mongo/db/auth/security_key.cpp b/src/mongo/db/auth/security_key.cpp
index a8e5611e1c0..97e7076c447 100644
--- a/src/mongo/db/auth/security_key.cpp
+++ b/src/mongo/db/auth/security_key.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/auth/security_key.h"
-#include <sys/stat.h>
#include <string>
+#include <sys/stat.h>
#include <vector>
#include "mongo/base/status_with.h"
@@ -89,11 +89,14 @@ bool setUpSecurityKey(const string& filename) {
if (clusterAuthMode == ServerGlobalParams::ClusterAuthMode_keyFile ||
clusterAuthMode == ServerGlobalParams::ClusterAuthMode_sendKeyFile) {
setInternalUserAuthParams(
- BSON(saslCommandMechanismFieldName
- << "SCRAM-SHA-1" << saslCommandUserDBFieldName
- << internalSecurity.user->getName().getDB() << saslCommandUserFieldName
- << internalSecurity.user->getName().getUser() << saslCommandPasswordFieldName
- << credentials.password << saslCommandDigestPasswordFieldName << false));
+ BSON(saslCommandMechanismFieldName << "SCRAM-SHA-1" << saslCommandUserDBFieldName
+ << internalSecurity.user->getName().getDB()
+ << saslCommandUserFieldName
+ << internalSecurity.user->getName().getUser()
+ << saslCommandPasswordFieldName
+ << credentials.password
+ << saslCommandDigestPasswordFieldName
+ << false));
}
return true;
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index ef64311cbe6..5f58d956c5c 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -130,7 +130,8 @@ void UserCacheInvalidator::initialize(OperationContext* txn) {
"running an outdated version of mongod on the config servers";
} else {
warning() << "An error occurred while fetching initial user cache generation from "
- "config servers: " << currentGeneration.getStatus();
+ "config servers: "
+ << currentGeneration.getStatus();
}
_previousCacheGeneration = OID();
}
@@ -162,7 +163,8 @@ void UserCacheInvalidator::run() {
if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) {
warning() << "_getUserCacheGeneration command not found on config server(s), "
"this most likely means you are running an outdated version of mongod "
- "on the config servers" << std::endl;
+ "on the config servers"
+ << std::endl;
} else {
warning() << "An error occurred while fetching current user cache generation "
"to check if user cache needs invalidation: "
diff --git a/src/mongo/db/auth/user_document_parser_test.cpp b/src/mongo/db/auth/user_document_parser_test.cpp
index c3a1e0a490f..273eaff86f5 100644
--- a/src/mongo/db/auth/user_document_parser_test.cpp
+++ b/src/mongo/db/auth/user_document_parser_test.cpp
@@ -74,7 +74,8 @@ TEST_F(V1UserDocumentParsing, testParsingV0UserDocuments) {
<< "spencer"
<< "pwd"
<< "passwordHash"
- << "readOnly" << true);
+ << "readOnly"
+ << true);
BSONObj readWriteAdmin = BSON("user"
<< "admin"
<< "pwd"
@@ -83,7 +84,8 @@ TEST_F(V1UserDocumentParsing, testParsingV0UserDocuments) {
<< "admin"
<< "pwd"
<< "passwordHash"
- << "readOnly" << true);
+ << "readOnly"
+ << true);
ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(user.get(), readOnly, "test"));
RoleNameIterator roles = user->getRoles();
@@ -124,15 +126,15 @@ TEST_F(V1UserDocumentParsing, VerifyRolesFieldMustBeAnArray) {
}
TEST_F(V1UserDocumentParsing, VerifySemanticallyInvalidRolesStillParse) {
- ASSERT_OK(
- v1parser.initializeUserRolesFromUserDocument(user.get(),
- BSON("user"
- << "spencer"
- << "pwd"
- << ""
- << "roles" << BSON_ARRAY("read"
- << "frim")),
- "test"));
+ ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "pwd"
+ << ""
+ << "roles"
+ << BSON_ARRAY("read"
+ << "frim")),
+ "test"));
RoleNameIterator roles = user->getRoles();
RoleName role = roles.next();
if (role == RoleName("read", "test")) {
@@ -145,26 +147,28 @@ TEST_F(V1UserDocumentParsing, VerifySemanticallyInvalidRolesStillParse) {
}
TEST_F(V1UserDocumentParsing, VerifyOtherDBRolesMustBeAnObjectOfArraysOfStrings) {
- ASSERT_NOT_OK(
- v1parser.initializeUserRolesFromUserDocument(adminUser.get(),
- BSON("user"
- << "admin"
- << "pwd"
- << ""
- << "roles" << BSON_ARRAY("read")
- << "otherDBRoles" << BSON_ARRAY("read")),
- "admin"));
+ ASSERT_NOT_OK(v1parser.initializeUserRolesFromUserDocument(adminUser.get(),
+ BSON("user"
+ << "admin"
+ << "pwd"
+ << ""
+ << "roles"
+ << BSON_ARRAY("read")
+ << "otherDBRoles"
+ << BSON_ARRAY("read")),
+ "admin"));
- ASSERT_NOT_OK(
- v1parser.initializeUserRolesFromUserDocument(adminUser.get(),
- BSON("user"
- << "admin"
- << "pwd"
- << ""
- << "roles" << BSON_ARRAY("read")
- << "otherDBRoles" << BSON("test2"
- << "read")),
- "admin"));
+ ASSERT_NOT_OK(v1parser.initializeUserRolesFromUserDocument(adminUser.get(),
+ BSON("user"
+ << "admin"
+ << "pwd"
+ << ""
+ << "roles"
+ << BSON_ARRAY("read")
+ << "otherDBRoles"
+ << BSON("test2"
+ << "read")),
+ "admin"));
}
TEST_F(V1UserDocumentParsing, VerifyCannotGrantPrivilegesOnOtherDatabasesNormally) {
@@ -175,7 +179,8 @@ TEST_F(V1UserDocumentParsing, VerifyCannotGrantPrivilegesOnOtherDatabasesNormall
<< "spencer"
<< "pwd"
<< ""
- << "roles" << BSONArrayBuilder().arr()
+ << "roles"
+ << BSONArrayBuilder().arr()
<< "otherDBRoles"
<< BSON("test2" << BSON_ARRAY("read"))),
"test"));
@@ -184,15 +189,17 @@ TEST_F(V1UserDocumentParsing, VerifyCannotGrantPrivilegesOnOtherDatabasesNormall
TEST_F(V1UserDocumentParsing, GrantUserAdminOnTestViaAdmin) {
// Grant userAdmin on test via admin.
- ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(
- adminUser.get(),
- BSON("user"
- << "admin"
- << "pwd"
- << ""
- << "roles" << BSONArrayBuilder().arr() << "otherDBRoles"
- << BSON("test" << BSON_ARRAY("userAdmin"))),
- "admin"));
+ ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(adminUser.get(),
+ BSON("user"
+ << "admin"
+ << "pwd"
+ << ""
+ << "roles"
+ << BSONArrayBuilder().arr()
+ << "otherDBRoles"
+ << BSON("test" << BSON_ARRAY(
+ "userAdmin"))),
+ "admin"));
RoleNameIterator roles = adminUser->getRoles();
ASSERT_EQUALS(RoleName("userAdmin", "test"), roles.next());
ASSERT_FALSE(roles.more());
@@ -200,15 +207,16 @@ TEST_F(V1UserDocumentParsing, GrantUserAdminOnTestViaAdmin) {
TEST_F(V1UserDocumentParsing, MixedV0V1UserDocumentsAreInvalid) {
// Try to mix fields from V0 and V1 user documents and make sure it fails.
- ASSERT_NOT_OK(
- v1parser.initializeUserRolesFromUserDocument(user.get(),
- BSON("user"
- << "spencer"
- << "pwd"
- << "passwordHash"
- << "readOnly" << false << "roles"
- << BSON_ARRAY("read")),
- "test"));
+ ASSERT_NOT_OK(v1parser.initializeUserRolesFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "pwd"
+ << "passwordHash"
+ << "readOnly"
+ << false
+ << "roles"
+ << BSON_ARRAY("read")),
+ "test"));
ASSERT_FALSE(user->getRoles().more());
}
@@ -235,20 +243,25 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "pwd"
<< "a"
- << "roles" << BSON_ARRAY("read"))));
+ << "roles"
+ << BSON_ARRAY("read"))));
// Need name field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< emptyArray)));
// Need source field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< emptyArray)));
// Need credentials field
@@ -256,23 +269,27 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "roles" << emptyArray)));
+ << "roles"
+ << emptyArray)));
// Need roles field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a"))));
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a"))));
// Empty roles arrays are OK
ASSERT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< emptyArray)));
// Need credentials of {external: true} if user's db is $external
@@ -280,16 +297,20 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "$external"
- << "credentials" << BSON("external" << true)
- << "roles" << emptyArray)));
+ << "credentials"
+ << BSON("external" << true)
+ << "roles"
+ << emptyArray)));
// Roles must be objects
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY("read"))));
// Role needs name
@@ -297,8 +318,10 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("db"
<< "dbA")))));
@@ -307,8 +330,10 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA")))));
@@ -318,8 +343,10 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
@@ -330,8 +357,10 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
@@ -346,10 +375,13 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "extraData"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "extraData"
<< BSON("foo"
- << "bar") << "roles"
+ << "bar")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
@@ -424,44 +456,45 @@ TEST_F(V2UserDocumentParsing, V2RoleExtraction) {
user.get()));
// V1-style roles arrays no longer work
- ASSERT_NOT_OK(
- v2parser.initializeUserRolesFromUserDocument(BSON("user"
- << "spencer"
- << "roles" << BSON_ARRAY("read")),
- user.get()));
+ ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles"
+ << BSON_ARRAY("read")),
+ user.get()));
// Roles must have "db" field
- ASSERT_NOT_OK(
- v2parser.initializeUserRolesFromUserDocument(BSON("user"
- << "spencer"
- << "roles" << BSON_ARRAY(BSONObj())),
- user.get()));
-
ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
<< "spencer"
- << "roles" << BSON_ARRAY(BSON(
- "role"
- << "roleA"))),
+ << "roles"
+ << BSON_ARRAY(BSONObj())),
user.get()));
ASSERT_NOT_OK(
v2parser.initializeUserRolesFromUserDocument(BSON("user"
<< "spencer"
- << "roles" << BSON_ARRAY(BSON("user"
- << "roleA"
- << "db"
- << "dbA"))),
+ << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "roleA"))),
user.get()));
+ ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles"
+ << BSON_ARRAY(BSON("user"
+ << "roleA"
+ << "db"
+ << "dbA"))),
+ user.get()));
+
// Valid role names are extracted successfully
- ASSERT_OK(
- v2parser.initializeUserRolesFromUserDocument(BSON("user"
- << "spencer"
- << "roles" << BSON_ARRAY(BSON("role"
- << "roleA"
- << "db"
- << "dbA"))),
- user.get()));
+ ASSERT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "roleA"
+ << "db"
+ << "dbA"))),
+ user.get()));
RoleNameIterator roles = user->getRoles();
ASSERT_EQUALS(RoleName("roleA", "dbA"), roles.next());
ASSERT_FALSE(roles.more());
diff --git a/src/mongo/db/auth/user_management_commands_parser.cpp b/src/mongo/db/auth/user_management_commands_parser.cpp
index 6707a70b8ca..db6a2f96f9a 100644
--- a/src/mongo/db/auth/user_management_commands_parser.cpp
+++ b/src/mongo/db/auth/user_management_commands_parser.cpp
@@ -684,8 +684,11 @@ Status parseAuthSchemaUpgradeCommand(const BSONObj& cmdObj,
if (steps < minUpgradeSteps || steps > maxUpgradeSteps) {
return Status(ErrorCodes::BadValue,
mongoutils::str::stream() << "Legal values for \"maxSteps\" are at least "
- << minUpgradeSteps << " and no more than "
- << maxUpgradeSteps << "; found " << steps);
+ << minUpgradeSteps
+ << " and no more than "
+ << maxUpgradeSteps
+ << "; found "
+ << steps);
}
parsedArgs->maxSteps = static_cast<int>(steps);
diff --git a/src/mongo/db/auth/user_management_commands_parser.h b/src/mongo/db/auth/user_management_commands_parser.h
index ff65eca69e4..94dc3b7b2ae 100644
--- a/src/mongo/db/auth/user_management_commands_parser.h
+++ b/src/mongo/db/auth/user_management_commands_parser.h
@@ -31,9 +31,9 @@
#include <string>
#include <vector>
+#include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h"
#include "mongo/base/string_data.h"
-#include "mongo/base/disallow_copying.h"
#include "mongo/db/auth/privilege.h"
#include "mongo/db/auth/role_name.h"
#include "mongo/db/auth/user.h"
diff --git a/src/mongo/db/background.cpp b/src/mongo/db/background.cpp
index e3869b1bd6b..18af0509631 100644
--- a/src/mongo/db/background.cpp
+++ b/src/mongo/db/background.cpp
@@ -133,7 +133,8 @@ void BackgroundOperation::assertNoBgOpInProgForDb(StringData db) {
uassert(ErrorCodes::BackgroundOperationInProgressForDatabase,
mongoutils::str::stream()
<< "cannot perform operation: a background operation is currently running for "
- "database " << db,
+ "database "
+ << db,
!inProgForDb(db));
}
@@ -141,7 +142,8 @@ void BackgroundOperation::assertNoBgOpInProgForNs(StringData ns) {
uassert(ErrorCodes::BackgroundOperationInProgressForNamespace,
mongoutils::str::stream()
<< "cannot perform operation: a background operation is currently running for "
- "collection " << ns,
+ "collection "
+ << ns,
!inProgForNs(ns));
}
diff --git a/src/mongo/db/background.h b/src/mongo/db/background.h
index b510c165a96..f8cad335a34 100644
--- a/src/mongo/db/background.h
+++ b/src/mongo/db/background.h
@@ -33,9 +33,9 @@
#pragma once
+#include <iosfwd>
#include <map>
#include <set>
-#include <iosfwd>
#include "mongo/base/disallow_copying.h"
#include "mongo/base/string_data.h"
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index 2883cb26439..c18a7b0975f 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -38,8 +38,8 @@
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/index_catalog.h"
-#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/client.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/index_builder.h"
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 0f2304a6f49..b73b732b723 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -105,9 +105,9 @@ Status collMod(OperationContext* txn,
const IndexDescriptor* idx =
coll->getIndexCatalog()->findIndexByKeyPattern(txn, keyPattern);
if (idx == NULL) {
- errorStatus = Status(ErrorCodes::InvalidOptions,
- str::stream() << "cannot find index " << keyPattern
- << " for ns " << nss.ns());
+ errorStatus = Status(
+ ErrorCodes::InvalidOptions,
+ str::stream() << "cannot find index " << keyPattern << " for ns " << nss.ns());
continue;
}
BSONElement oldExpireSecs = idx->infoObj().getField("expireAfterSeconds");
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 883bf5deb17..02bb9bbaa1d 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -306,7 +306,9 @@ StatusWithMatchExpression Collection::parseValidator(const BSONObj& validator) c
if (ns().isOnInternalDb()) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Document validators are not allowed on collections in"
- << " the " << ns().db() << " database"};
+ << " the "
+ << ns().db()
+ << " database"};
}
{
@@ -358,7 +360,8 @@ Status Collection::insertDocuments(OperationContext* txn,
if (hasIdIndex && (*it)["_id"].eoo()) {
return Status(ErrorCodes::InternalError,
str::stream() << "Collection::insertDocument got "
- "document without _id for ns:" << _ns.ns());
+ "document without _id for ns:"
+ << _ns.ns());
}
auto status = checkValidation(txn, *it);
@@ -600,7 +603,9 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
if (_recordStore->isCapped() && oldSize != newDoc.objsize())
return {ErrorCodes::CannotGrowDocumentInCappedNamespace,
str::stream() << "Cannot change the size of a document in a capped collection: "
- << oldSize << " != " << newDoc.objsize()};
+ << oldSize
+ << " != "
+ << newDoc.objsize()};
// At the end of this step, we will have a map of UpdateTickets, one per index, which
// represent the index updates needed to be done, based on the changes between oldDoc and
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index b9ea06a2aef..b8fbffe2f69 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -34,13 +34,13 @@
#include "mongo/base/counter.h"
#include "mongo/base/owned_pointer_map.h"
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/index_create.h"
+#include "mongo/db/catalog/index_key_validate.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands/server_status.h"
#include "mongo/db/curop.h"
-#include "mongo/db/catalog/database.h"
-#include "mongo/db/catalog/document_validation.h"
-#include "mongo/db/catalog/index_key_validate.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/operation_context.h"
#include "mongo/util/log.h"
@@ -150,7 +150,9 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
return StatusWith<CompactStats>(
ErrorCodes::CannotCreateIndex,
str::stream() << "Cannot compact collection due to invalid index " << spec
- << ": " << keyStatus.reason() << " For more info see"
+ << ": "
+ << keyStatus.reason()
+ << " For more info see"
<< " http://dochub.mongodb.org/core/index-validation");
}
indexSpecs.push_back(spec);
diff --git a/src/mongo/db/catalog/collection_options_test.cpp b/src/mongo/db/catalog/collection_options_test.cpp
index 4e773f1b027..25a03c16270 100644
--- a/src/mongo/db/catalog/collection_options_test.cpp
+++ b/src/mongo/db/catalog/collection_options_test.cpp
@@ -131,9 +131,9 @@ TEST(CollectionOptions, InvalidStorageEngineField) {
TEST(CollectionOptions, ParseEngineField) {
CollectionOptions opts;
- ASSERT_OK(opts.parse(fromjson(
- "{unknownField: 1, "
- "storageEngine: {storageEngine1: {x: 1, y: 2}, storageEngine2: {a: 1, b:2}}}")));
+ ASSERT_OK(opts.parse(
+ fromjson("{unknownField: 1, "
+ "storageEngine: {storageEngine1: {x: 1, y: 2}, storageEngine2: {a: 1, b:2}}}")));
checkRoundTrip(opts);
// Unrecognized field should not be present in BSON representation.
diff --git a/src/mongo/db/catalog/cursor_manager.cpp b/src/mongo/db/catalog/cursor_manager.cpp
index b93c0bb4332..14a7042a13c 100644
--- a/src/mongo/db/catalog/cursor_manager.cpp
+++ b/src/mongo/db/catalog/cursor_manager.cpp
@@ -40,9 +40,9 @@
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/query/plan_executor.h"
+#include "mongo/db/service_context.h"
#include "mongo/platform/random.h"
#include "mongo/util/exit.h"
#include "mongo/util/startup_test.h"
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 080d37e0eb8..26eb99c0a9c 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -500,7 +500,9 @@ Collection* Database::createCollection(OperationContext* txn,
// This check only applies for actual collections, not indexes or other types of ns.
uassert(17381,
str::stream() << "fully qualified namespace " << ns << " is too long "
- << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)",
+ << "(max is "
+ << NamespaceString::MaxNsCollectionLen
+ << " bytes)",
ns.size() <= NamespaceString::MaxNsCollectionLen);
}
diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp
index 7ba373fd362..cf27d3da9a7 100644
--- a/src/mongo/db/catalog/database_holder.cpp
+++ b/src/mongo/db/catalog/database_holder.cpp
@@ -35,12 +35,12 @@
#include "mongo/db/audit.h"
#include "mongo/db/auth/auth_index_d.h"
#include "mongo/db/background.h"
-#include "mongo/db/client.h"
-#include "mongo/db/clientcursor.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_catalog_entry.h"
-#include "mongo/db/service_context.h"
+#include "mongo/db/client.h"
+#include "mongo/db/clientcursor.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index 85430ff6f10..7d6387e4ae8 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -107,8 +107,8 @@ Status wrappedRun(OperationContext* txn,
collection->getIndexCatalog()->findIndexByKeyPattern(txn, f.embeddedObject());
if (desc == NULL) {
return Status(ErrorCodes::IndexNotFound,
- str::stream()
- << "can't find index with key: " << f.embeddedObject().toString());
+ str::stream() << "can't find index with key: "
+ << f.embeddedObject().toString());
}
if (desc->isIdIndex()) {
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 7887759486b..58efa054129 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -38,8 +38,8 @@
#include "mongo/db/audit.h"
#include "mongo/db/background.h"
-#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/database_catalog_entry.h"
#include "mongo/db/catalog/index_create.h"
#include "mongo/db/catalog/index_key_validate.h"
@@ -47,7 +47,6 @@
#include "mongo/db/clientcursor.h"
#include "mongo/db/curop.h"
#include "mongo/db/field_ref.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index_legacy.h"
@@ -56,12 +55,13 @@
#include "mongo/db/keypattern.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/query/collation/collation_serializer.h"
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -186,7 +186,8 @@ Status IndexCatalog::checkUnfinished() const {
return Status(ErrorCodes::InternalError,
str::stream() << "IndexCatalog has left over indexes that must be cleared"
- << " ns: " << _collection->ns().ns());
+ << " ns: "
+ << _collection->ns().ns());
}
bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn, const BSONObj& keyPattern) const {
@@ -199,7 +200,8 @@ bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn, const BSONObj& k
// supports an index plugin unsupported by this version.
uassert(17197,
str::stream() << "Invalid index type '" << pluginName << "' "
- << "in index " << keyPattern,
+ << "in index "
+ << keyPattern,
known);
return false;
}
@@ -483,7 +485,8 @@ Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const
if (v != 0 && v != 1) {
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "this version of mongod cannot build new indexes "
- << "of version number " << v);
+ << "of version number "
+ << v);
}
}
@@ -508,7 +511,9 @@ Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "the \"ns\" field of the index spec '"
<< specNamespace.valueStringData()
- << "' does not match the collection name '" << nss.ns() << "'");
+ << "' does not match the collection name '"
+ << nss.ns()
+ << "'");
// logical name of the index
const BSONElement nameElem = spec["name"];
@@ -526,7 +531,8 @@ Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const
if (indexNamespace.length() > NamespaceString::MaxNsLen)
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "namespace name generated from index name \""
- << indexNamespace << "\" is too long (127 byte max)");
+ << indexNamespace
+ << "\" is too long (127 byte max)");
const BSONObj key = spec.getObjectField("key");
const Status keyStatus = validateKeyPattern(key);
@@ -650,9 +656,12 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
if (!desc->keyPattern().equal(key))
return Status(ErrorCodes::IndexKeySpecsConflict,
str::stream() << "Trying to create an index "
- << "with same name " << name
- << " with different key spec " << key
- << " vs existing spec " << desc->keyPattern());
+ << "with same name "
+ << name
+ << " with different key spec "
+ << key
+ << " vs existing spec "
+ << desc->keyPattern());
IndexDescriptor temp(_collection, _getAccessMethodName(txn, key), spec);
if (!desc->areIndexOptionsEquivalent(&temp))
@@ -702,7 +711,8 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "only one text index per collection allowed, "
<< "found existing text index \""
- << textIndexes[0]->indexName() << "\"");
+ << textIndexes[0]->indexName()
+ << "\"");
}
}
return Status::OK();
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 72c0bc6a28a..4007879a247 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -44,9 +44,9 @@
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/curop.h"
#include "mongo/db/exec/working_set_common.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/operation_context.h"
#include "mongo/stdx/mutex.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/fail_point_service.h"
diff --git a/src/mongo/db/catalog/index_key_validate_test.cpp b/src/mongo/db/catalog/index_key_validate_test.cpp
index 1cfe8c03d69..46c8bd394a2 100644
--- a/src/mongo/db/catalog/index_key_validate_test.cpp
+++ b/src/mongo/db/catalog/index_key_validate_test.cpp
@@ -87,7 +87,8 @@ TEST(IndexKeyValidateTest, KeyElementBooleanValueFails) {
ASSERT_EQ(ErrorCodes::CannotCreateIndex,
validateKeyPattern(BSON("a"
<< "2dsphere"
- << "b" << true)));
+ << "b"
+ << true)));
}
TEST(IndexKeyValidateTest, KeyElementNullValueFails) {
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 1fd09e158cc..2f3517e8159 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -78,7 +78,8 @@ Status renameCollection(OperationContext* txn,
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while renaming collection " << source.ns()
- << " to " << target.ns());
+ << " to "
+ << target.ns());
}
Database* const sourceDB = dbHolder().get(txn, source.db());
diff --git a/src/mongo/db/clientlistplugin.cpp b/src/mongo/db/clientlistplugin.cpp
index 2aef3af715c..713e9a176f0 100644
--- a/src/mongo/db/clientlistplugin.cpp
+++ b/src/mongo/db/clientlistplugin.cpp
@@ -63,7 +63,8 @@ public:
<< th(a("", "Connections to the database, both internal and external.", "Client"))
<< th(a("http://dochub.mongodb.org/core/viewingandterminatingcurrentoperation",
"",
- "OpId")) << "<th>Locking</th>"
+ "OpId"))
+ << "<th>Locking</th>"
<< "<th>Waiting</th>"
<< "<th>SecsRunning</th>"
<< "<th>Op</th>"
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 635381749f0..e4ae99ee19d 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -125,7 +125,8 @@ struct Cloner::Fun {
unique_ptr<Lock::GlobalWrite> globalWriteLock(new Lock::GlobalWrite(txn->lockState()));
uassert(ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from_collection.ns()
- << " to " << to_collection.ns(),
+ << " to "
+ << to_collection.ns(),
!txn->writesAreReplicated() ||
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(to_collection));
@@ -279,7 +280,10 @@ void Cloner::copy(OperationContext* txn,
uassert(ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from_collection.ns()
- << " to " << to_collection.ns() << " with filter " << query.toString(),
+ << " to "
+ << to_collection.ns()
+ << " with filter "
+ << query.toString(),
!txn->writesAreReplicated() ||
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(to_collection));
}
@@ -308,7 +312,9 @@ void Cloner::copyIndexes(OperationContext* txn,
uassert(ErrorCodes::NotMaster,
str::stream() << "Not primary while copying indexes from " << from_collection.ns()
- << " to " << to_collection.ns() << " (Cloner)",
+ << " to "
+ << to_collection.ns()
+ << " (Cloner)",
!txn->writesAreReplicated() ||
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(to_collection));
diff --git a/src/mongo/db/cloner.h b/src/mongo/db/cloner.h
index 7b091e4f29b..7f1629fafde 100644
--- a/src/mongo/db/cloner.h
+++ b/src/mongo/db/cloner.h
@@ -30,8 +30,8 @@
#pragma once
-#include <vector>
#include <string>
+#include <vector>
#include "mongo/base/disallow_copying.h"
#include "mongo/client/dbclientinterface.h"
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index 1f59a884410..abfd3c221f5 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -78,9 +78,9 @@ Command::~Command() = default;
string Command::parseNsFullyQualified(const string& dbname, const BSONObj& cmdObj) const {
BSONElement first = cmdObj.firstElement();
uassert(17005,
- mongoutils::str::stream()
- << "Main argument to " << first.fieldNameStringData()
- << " must be a fully qualified namespace string. Found: " << first.toString(false),
+ mongoutils::str::stream() << "Main argument to " << first.fieldNameStringData()
+ << " must be a fully qualified namespace string. Found: "
+ << first.toString(false),
first.type() == mongo::String &&
NamespaceString::validCollectionComponent(first.valuestr()));
return first.String();
@@ -108,7 +108,9 @@ NamespaceString Command::parseNsCollectionRequired(const string& dbname,
#if defined(CLC)
DEV if (mongoutils::str::startsWith(coll, dbname + '.')) {
log() << "DEBUG parseNs Command's collection name looks like it includes the db name\n"
- << dbname << '\n' << coll << '\n' << cmdObj.toString();
+ << dbname << '\n'
+ << coll << '\n'
+ << cmdObj.toString();
dassert(false);
}
#endif
diff --git a/src/mongo/db/commands/apply_ops_cmd.cpp b/src/mongo/db/commands/apply_ops_cmd.cpp
index 12df18040a9..21947850f6c 100644
--- a/src/mongo/db/commands/apply_ops_cmd.cpp
+++ b/src/mongo/db/commands/apply_ops_cmd.cpp
@@ -43,16 +43,16 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/dbhash.h"
-#include "mongo/db/db_raii.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/matcher.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -147,8 +147,8 @@ private:
// op - operation type
BSONElement opElement = obj.getField("op");
if (opElement.eoo()) {
- errmsg = str::stream()
- << "op does not contain required \"op\" field: " << e.fieldName();
+ errmsg = str::stream() << "op does not contain required \"op\" field: "
+ << e.fieldName();
return false;
}
if (opElement.type() != mongo::String) {
@@ -166,8 +166,8 @@ private:
// Only operations of type 'n' are allowed to have an empty namespace.
BSONElement nsElement = obj.getField("ns");
if (nsElement.eoo()) {
- errmsg = str::stream()
- << "op does not contain required \"ns\" field: " << e.fieldName();
+ errmsg = str::stream() << "op does not contain required \"ns\" field: "
+ << e.fieldName();
return false;
}
if (nsElement.type() != mongo::String) {
@@ -179,8 +179,8 @@ private:
return false;
}
if (*opType != 'n' && nsElement.String().empty()) {
- errmsg = str::stream()
- << "\"ns\" field value cannot be empty when op type is not 'n': " << e.fieldName();
+ errmsg = str::stream() << "\"ns\" field value cannot be empty when op type is not 'n': "
+ << e.fieldName();
return false;
}
return true;
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index 9ce858feb3d..ddb251bc4c9 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -30,8 +30,8 @@
#include "mongo/base/status.h"
#include "mongo/db/auth/action_set.h"
-#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/cloner.h"
#include "mongo/db/commands.h"
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index 87673050a2a..b8b475b6926 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -35,8 +35,8 @@
#include "mongo/bson/util/builder.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/auth/action_set.h"
-#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/cloner.h"
@@ -48,8 +48,8 @@
#include "mongo/db/instance.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/repl/isself.h"
#include "mongo/db/ops/insert.h"
+#include "mongo/db/repl/isself.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 59866778ab1..5484395cd6e 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -36,12 +36,12 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index_builder.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/query/find.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
namespace mongo {
@@ -100,11 +100,12 @@ public:
NamespaceString nss(dbname, to);
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
- return appendCommandStatus(result,
- Status(ErrorCodes::NotMaster,
- str::stream()
- << "Not primary while cloning collection " << from
- << " to " << to << " (as capped)"));
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while cloning collection " << from << " to "
+ << to
+ << " (as capped)"));
}
Database* const db = autoDb.getDb();
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index 7619c87a20d..92d721b70e1 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -31,8 +31,8 @@
#include "mongo/base/status.h"
#include "mongo/client/sasl_client_authenticate.h"
#include "mongo/db/auth/action_set.h"
-#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/cloner.h"
#include "mongo/db/commands.h"
@@ -165,10 +165,11 @@ public:
uassert(13008, "must call copydbgetnonce first", authConn.get());
BSONObj ret;
{
- if (!authConn->runCommand(cloneOptions.fromDB,
- BSON("authenticate" << 1 << "user" << username << "nonce"
- << nonce << "key" << key),
- ret)) {
+ if (!authConn->runCommand(
+ cloneOptions.fromDB,
+ BSON("authenticate" << 1 << "user" << username << "nonce" << nonce << "key"
+ << key),
+ ret)) {
errmsg = "unable to login " + ret.toString();
authConn.reset();
return false;
@@ -179,11 +180,11 @@ public:
cmdObj.hasField(saslCommandPayloadFieldName)) {
uassert(25487, "must call copydbsaslstart first", authConn.get());
BSONObj ret;
- if (!authConn->runCommand(cloneOptions.fromDB,
- BSON("saslContinue"
- << 1 << cmdObj[saslCommandConversationIdFieldName]
- << cmdObj[saslCommandPayloadFieldName]),
- ret)) {
+ if (!authConn->runCommand(
+ cloneOptions.fromDB,
+ BSON("saslContinue" << 1 << cmdObj[saslCommandConversationIdFieldName]
+ << cmdObj[saslCommandPayloadFieldName]),
+ ret)) {
errmsg = "unable to login " + ret.toString();
authConn.reset();
return false;
diff --git a/src/mongo/db/commands/copydb_common.cpp b/src/mongo/db/commands/copydb_common.cpp
index 5f033aede73..2a690ae0a17 100644
--- a/src/mongo/db/commands/copydb_common.cpp
+++ b/src/mongo/db/commands/copydb_common.cpp
@@ -69,8 +69,8 @@ Status checkAuthForCopydbCommand(ClientBasic* client,
actions.addAction(ActionType::bypassDocumentValidation);
}
- if (!AuthorizationSession::get(client)
- ->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(todb), actions)) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(todb), actions)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
diff --git a/src/mongo/db/commands/copydb_start_commands.cpp b/src/mongo/db/commands/copydb_start_commands.cpp
index 609c1167184..8426b14d072 100644
--- a/src/mongo/db/commands/copydb_start_commands.cpp
+++ b/src/mongo/db/commands/copydb_start_commands.cpp
@@ -37,8 +37,8 @@
#include "mongo/client/dbclientinterface.h"
#include "mongo/client/sasl_client_authenticate.h"
#include "mongo/db/auth/action_set.h"
-#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/client.h"
#include "mongo/db/cloner.h"
#include "mongo/db/commands.h"
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index f18c15bce74..c7e42889772 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -41,13 +41,13 @@
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/curop.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/ops/insert.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/service_context.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/util/scopeguard.h"
@@ -130,8 +130,8 @@ public:
BSONElement e = it.next();
StringData fieldName(e.fieldName(), e.fieldNameSize());
if (std::find(keys.begin(), keys.end(), fieldName) != keys.end()) {
- errmsg = str::stream()
- << "duplicate keys detected in index spec: " << indexKey;
+ errmsg = str::stream() << "duplicate keys detected in index spec: "
+ << indexKey;
return false;
}
keys.push_back(fieldName);
@@ -286,7 +286,8 @@ public:
Status(ErrorCodes::NotMaster,
str::stream()
<< "Not primary while creating background indexes in "
- << ns.ns() << ": cleaning up index build failure due to "
+ << ns.ns()
+ << ": cleaning up index build failure due to "
<< e.toString()));
}
} catch (...) {
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 83aed3c3d0e..a280f059f23 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -127,8 +127,10 @@ public:
return Status(ErrorCodes::TypeMismatch,
str::stream() << "\"" << kQueryField
<< "\" had the wrong type. Expected "
- << typeName(BSONType::Object) << " or "
- << typeName(BSONType::jstNULL) << ", found "
+ << typeName(BSONType::Object)
+ << " or "
+ << typeName(BSONType::jstNULL)
+ << ", found "
<< typeName(queryElt.type()));
}
}
@@ -142,7 +144,8 @@ public:
return Status(ErrorCodes::TypeMismatch,
str::stream() << "\"" << kCollationField
<< "\" had the wrong type. Expected "
- << typeName(BSONType::Object) << ", found "
+ << typeName(BSONType::Object)
+ << ", found "
<< typeName(collationElt.type()));
}
collation = collationElt.embeddedObject();
@@ -198,8 +201,8 @@ public:
{
stdx::lock_guard<Client>(*txn->getClient());
- CurOp::get(txn)
- ->setPlanSummary_inlock(Explain::getPlanSummary(executor.getValue().get()));
+ CurOp::get(txn)->setPlanSummary_inlock(
+ Explain::getPlanSummary(executor.getValue().get()));
}
string key = cmdObj[kKeyField].valuestrsafe();
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 9829ce29e3c..2b334a51e87 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -49,11 +49,11 @@
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index_builder.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/log.h"
namespace mongo {
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 0691b451f3e..dd1e04f311b 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/commands/find_and_modify.h"
-#include <memory>
#include <boost/optional.hpp>
+#include <memory>
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
@@ -127,7 +127,8 @@ StatusWith<boost::optional<BSONObj>> advanceExecutor(OperationContext* txn,
const std::string opstr = isRemove ? "delete" : "update";
return {ErrorCodes::OperationFailed,
str::stream() << "executor returned " << PlanExecutor::statestr(state)
- << " while executing " << opstr};
+ << " while executing "
+ << opstr};
}
invariant(state == PlanExecutor::IS_EOF);
@@ -376,8 +377,8 @@ public:
// Attach the namespace and database profiling level to the current op.
{
stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)
- ->enter_inlock(nsString.ns().c_str(), autoDb.getDb()->getProfilingLevel());
+ CurOp::get(txn)->enter_inlock(nsString.ns().c_str(),
+ autoDb.getDb()->getProfilingLevel());
}
auto css = CollectionShardingState::get(txn, nsString);
@@ -444,8 +445,8 @@ public:
// Attach the namespace and database profiling level to the current op.
{
stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)
- ->enter_inlock(nsString.ns().c_str(), autoDb.getDb()->getProfilingLevel());
+ CurOp::get(txn)->enter_inlock(nsString.ns().c_str(),
+ autoDb.getDb()->getProfilingLevel());
}
auto css = CollectionShardingState::get(txn, nsString);
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 547ebc4bfa1..5573e8fc819 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -41,16 +41,16 @@
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/matcher/extensions_callback_real.h"
-#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/query/explain.h"
#include "mongo/db/query/find.h"
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/get_executor.h"
+#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/server_parameters.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/stats/counters.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/commands/generic.cpp b/src/mongo/db/commands/generic.cpp
index 05724e33705..941ac0d2503 100644
--- a/src/mongo/db/commands/generic.cpp
+++ b/src/mongo/db/commands/generic.cpp
@@ -281,9 +281,9 @@ public:
if (it->first == it->second->getName())
commands.push_back(it->second);
}
- std::sort(commands.begin(),
- commands.end(),
- [](Command* lhs, Command* rhs) { return (lhs->getName()) < (rhs->getName()); });
+ std::sort(commands.begin(), commands.end(), [](Command* lhs, Command* rhs) {
+ return (lhs->getName()) < (rhs->getName());
+ });
BSONObjBuilder b(result.subobjStart("commands"));
for (const auto& c : commands) {
@@ -414,7 +414,8 @@ public:
result,
Status(ErrorCodes::TypeMismatch,
str::stream() << "Argument to getLog must be of type String; found "
- << val.toString(false) << " of type "
+ << val.toString(false)
+ << " of type "
<< typeName(val.type())));
}
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 1ffc0342330..c2d4bd0da2f 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -49,8 +49,8 @@
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/getmore_request.h"
#include "mongo/db/query/plan_summary_stats.h"
-#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/repl/oplog.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/service_context.h"
#include "mongo/db/stats/counters.h"
@@ -137,8 +137,8 @@ public:
}
const GetMoreRequest& request = parseStatus.getValue();
- return AuthorizationSession::get(client)
- ->checkAuthForGetMore(request.nss, request.cursorid, request.term.is_initialized());
+ return AuthorizationSession::get(client)->checkAuthForGetMore(
+ request.nss, request.cursorid, request.term.is_initialized());
}
bool run(OperationContext* txn,
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index 63308fc27ed..ba9ff7d2ead 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -94,8 +94,8 @@ private:
const std::string& dbname,
const BSONObj& cmdObj) {
std::string ns = parseNs(dbname, cmdObj);
- if (!AuthorizationSession::get(client)
- ->isAuthorizedForActionsOnNamespace(NamespaceString(ns), ActionType::find)) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnNamespace(
+ NamespaceString(ns), ActionType::find)) {
return Status(ErrorCodes::Unauthorized, "unauthorized");
}
return Status::OK();
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index ab8c0634747..ecf27a3f1d8 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -30,8 +30,8 @@
#include "mongo/platform/basic.h"
-#include <string>
#include <sstream>
+#include <string>
#include "mongo/base/init.h"
#include "mongo/base/owned_pointer_vector.h"
@@ -105,8 +105,8 @@ static Status getQuerySettingsAndPlanCache(OperationContext* txn,
// available to the client.
//
-MONGO_INITIALIZER_WITH_PREREQUISITES(SetupIndexFilterCommands,
- MONGO_NO_PREREQUISITES)(InitializerContext* context) {
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetupIndexFilterCommands, MONGO_NO_PREREQUISITES)
+(InitializerContext* context) {
new ListFilters();
new ClearFilters();
new SetFilter();
diff --git a/src/mongo/db/commands/index_filter_commands.h b/src/mongo/db/commands/index_filter_commands.h
index 7ba1157bef7..80fed645c5e 100644
--- a/src/mongo/db/commands/index_filter_commands.h
+++ b/src/mongo/db/commands/index_filter_commands.h
@@ -29,8 +29,8 @@
#pragma once
#include "mongo/db/commands.h"
-#include "mongo/db/query/query_settings.h"
#include "mongo/db/query/plan_cache.h"
+#include "mongo/db/query/query_settings.h"
namespace mongo {
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 7f00a1521f4..765871ac134 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -308,13 +308,13 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}");
ASSERT_TRUE(planCacheContains(planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}"));
- ASSERT_OK(SetFilter::set(txn.get(),
- &querySettings,
- &planCache,
- nss.ns(),
- fromjson(
- "{query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
- "indexes: [{a: 1}]}")));
+ ASSERT_OK(
+ SetFilter::set(txn.get(),
+ &querySettings,
+ &planCache,
+ nss.ns(),
+ fromjson("{query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
+ "indexes: [{a: 1}]}")));
vector<BSONObj> filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
@@ -328,13 +328,13 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
// Replacing the hint for the same query shape ({a: 1, b: 1} and {b: 2, a: 3}
// share same shape) should not change the query settings size.
- ASSERT_OK(SetFilter::set(txn.get(),
- &querySettings,
- &planCache,
- nss.ns(),
- fromjson(
- "{query: {b: 2, a: 3}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
- "indexes: [{a: 1, b: 1}]}")));
+ ASSERT_OK(
+ SetFilter::set(txn.get(),
+ &querySettings,
+ &planCache,
+ nss.ns(),
+ fromjson("{query: {b: 2, a: 3}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
+ "indexes: [{a: 1, b: 1}]}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index f6d144de358..724c0f5f1f0 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -107,8 +107,8 @@ public:
}
return Status(ErrorCodes::Unauthorized,
- str::stream()
- << "Not authorized to list indexes on collection: " << ns.coll());
+ str::stream() << "Not authorized to list indexes on collection: "
+ << ns.coll());
}
CmdListIndexes() : Command("listIndexes") {}
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 62d92ca1120..a55d60d0fb0 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -416,7 +416,9 @@ void State::prepTempCollection() {
if (!status.isOK()) {
uasserted(17305,
str::stream() << "createIndex failed for mr incLong ns: "
- << _config.incLong << " err: " << status.code());
+ << _config.incLong
+ << " err: "
+ << status.code());
}
wuow.commit();
}
@@ -511,7 +513,9 @@ void State::appendResults(BSONObjBuilder& final) {
BSONObj idKey = BSON("_id" << 1);
if (!_db.runCommand("admin",
BSON("splitVector" << _config.outputOptions.finalNamespace
- << "keyPattern" << idKey << "maxChunkSizeBytes"
+ << "keyPattern"
+ << idKey
+ << "maxChunkSizeBytes"
<< _config.splitInfo),
res)) {
uasserted(15921, str::stream() << "splitVector failed: " << res);
@@ -622,7 +626,8 @@ long long State::postProcessCollectionNonAtomic(OperationContext* txn,
if (!_db.runCommand("admin",
BSON("renameCollection" << _config.tempNamespace << "to"
<< _config.outputOptions.finalNamespace
- << "stayTemp" << _config.shardedFirstPass),
+ << "stayTemp"
+ << _config.shardedFirstPass),
info)) {
uasserted(10076, str::stream() << "rename failed: " << info);
}
@@ -749,8 +754,10 @@ void State::_insertToInc(BSONObj& o) {
if (o.objsize() > BSONObjMaxUserSize) {
uasserted(ErrorCodes::BadValue,
str::stream() << "object to insert too large for incremental collection"
- << ". size in bytes: " << o.objsize()
- << ", max size: " << BSONObjMaxUserSize);
+ << ". size in bytes: "
+ << o.objsize()
+ << ", max size: "
+ << BSONObjMaxUserSize);
}
// TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261.
diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp
index d58ca5326f2..8987af00474 100644
--- a/src/mongo/db/commands/mr_test.cpp
+++ b/src/mongo/db/commands/mr_test.cpp
@@ -57,7 +57,11 @@ void _compareOutputOptionField(const std::string& dbname,
if (actual == expected)
return;
FAIL(str::stream() << "parseOutputOptions(\"" << dbname << ", " << cmdObjStr << "): "
- << fieldName << ": Expected: " << expected << ". Actual: " << actual);
+ << fieldName
+ << ": Expected: "
+ << expected
+ << ". Actual: "
+ << actual);
}
/**
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index bebf6d4d13c..784781405b4 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -32,13 +32,13 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/resource_pattern.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/commands.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/operation_context.h"
#include "mongo/db/op_observer.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
namespace mongo {
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 425d5eb8791..2dfe8fdf614 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -28,6 +28,7 @@
#include "mongo/platform/basic.h"
+#include "mongo/base/checked_cast.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
@@ -38,7 +39,6 @@
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/service_context.h"
#include "mongo/stdx/memory.h"
-#include "mongo/base/checked_cast.h"
namespace mongo {
@@ -103,7 +103,8 @@ public:
Status(ErrorCodes::BadValue,
str::stream()
<< "numCursors has to be between 1 and 10000"
- << " was: " << numCursors));
+ << " was: "
+ << numCursors));
auto iterators = collection->getManyCursors(txn);
if (iterators.size() < numCursors) {
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index c2a5bf23d91..acc8128536d 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -255,8 +255,8 @@ public:
int newValue;
if (!newValueElement.coerce(&newValue) || newValue < 0)
return Status(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Invalid value for logLevel: " << newValueElement);
+ mongoutils::str::stream() << "Invalid value for logLevel: "
+ << newValueElement);
LogSeverity newSeverity =
(newValue > 0) ? LogSeverity::Debug(newValue) : LogSeverity::Log();
globalLogDomain()->setMinimumLoggedSeverity(newSeverity);
@@ -483,7 +483,8 @@ public:
if (str != "disabled" && str != "allowSSL" && str != "preferSSL" && str != "requireSSL") {
return Status(ErrorCodes::BadValue,
mongoutils::str::stream()
- << "Invalid value for sslMode via setParameter command: " << str);
+ << "Invalid value for sslMode via setParameter command: "
+ << str);
}
int oldMode = sslGlobalParams.sslMode.load();
@@ -495,7 +496,9 @@ public:
return Status(ErrorCodes::BadValue,
mongoutils::str::stream()
<< "Illegal state transition for sslMode, attempt to change from "
- << sslModeStr() << " to " << str);
+ << sslModeStr()
+ << " to "
+ << str);
}
return Status::OK();
}
@@ -566,7 +569,9 @@ public:
#ifdef MONGO_CONFIG_SSL
setInternalUserAuthParams(
BSON(saslCommandMechanismFieldName
- << "MONGODB-X509" << saslCommandUserDBFieldName << "$external"
+ << "MONGODB-X509"
+ << saslCommandUserDBFieldName
+ << "$external"
<< saslCommandUserFieldName
<< getSSLManager()->getSSLConfiguration().clientSubjectName));
#endif
@@ -576,7 +581,9 @@ public:
return Status(ErrorCodes::BadValue,
mongoutils::str::stream()
<< "Illegal state transition for clusterAuthMode, change from "
- << clusterAuthModeStr() << " to " << str);
+ << clusterAuthModeStr()
+ << " to "
+ << str);
}
return Status::OK();
}
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index c3154f96391..a2962ae712b 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/pipeline_proxy.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/pipeline/accumulator.h"
#include "mongo/db/pipeline/document.h"
#include "mongo/db/pipeline/document_source.h"
@@ -52,6 +51,7 @@
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/plan_summary_stats.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/stdx/memory.h"
@@ -127,7 +127,9 @@ static bool handleCursorCommand(OperationContext* txn,
msgasserted(
17391,
str::stream() << "Aggregation has more results than fit in initial batch, but can't "
- << "create cursor since collection " << ns << " doesn't exist");
+ << "create cursor since collection "
+ << ns
+ << " doesn't exist");
}
if (cursor) {
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 440889255ba..0d4d11793f2 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -30,8 +30,8 @@
#include "mongo/platform/basic.h"
-#include <string>
#include <sstream>
+#include <string>
#include "mongo/base/init.h"
#include "mongo/base/status.h"
@@ -96,8 +96,8 @@ static Status getPlanCache(OperationContext* txn,
// available to the client.
//
-MONGO_INITIALIZER_WITH_PREREQUISITES(SetupPlanCacheCommands,
- MONGO_NO_PREREQUISITES)(InitializerContext* context) {
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetupPlanCacheCommands, MONGO_NO_PREREQUISITES)
+(InitializerContext* context) {
// PlanCacheCommand constructors refer to static ActionType instances.
// Registering commands in a mongo static initializer ensures that
// the ActionType construction will be completed first.
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 21cbc937e73..bb8e2f0ad7e 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -202,8 +202,9 @@ TEST(PlanCacheCommandsTest, Canonicalize) {
ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, nss.ns(), fromjson("{query: {}, sort: 1}"))
.getStatus());
// Bad query (invalid sort order)
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(
- &txn, nss.ns(), fromjson("{query: {}, sort: {a: 0}}")).getStatus());
+ ASSERT_NOT_OK(
+ PlanCacheCommand::canonicalize(&txn, nss.ns(), fromjson("{query: {}, sort: {a: 0}}"))
+ .getStatus());
// Valid parameters
auto statusWithCQ =
@@ -307,10 +308,12 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
// Check keys in cache before dropping {b: 1}
vector<BSONObj> shapesBefore = getShapes(planCache);
ASSERT_EQUALS(shapesBefore.size(), 2U);
- BSONObj shapeA = BSON("query" << cqA->getQueryObj() << "sort" << cqA->getParsed().getSort()
- << "projection" << cqA->getParsed().getProj());
- BSONObj shapeB = BSON("query" << cqB->getQueryObj() << "sort" << cqB->getParsed().getSort()
- << "projection" << cqB->getParsed().getProj());
+ BSONObj shapeA =
+ BSON("query" << cqA->getQueryObj() << "sort" << cqA->getParsed().getSort() << "projection"
+ << cqA->getParsed().getProj());
+ BSONObj shapeB =
+ BSON("query" << cqB->getQueryObj() << "sort" << cqB->getParsed().getSort() << "projection"
+ << cqB->getParsed().getProj());
ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeA) != shapesBefore.end());
ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeB) != shapesBefore.end());
diff --git a/src/mongo/db/commands/rename_collection_cmd.cpp b/src/mongo/db/commands/rename_collection_cmd.cpp
index 210e279607f..746aeb49db1 100644
--- a/src/mongo/db/commands/rename_collection_cmd.cpp
+++ b/src/mongo/db/commands/rename_collection_cmd.cpp
@@ -31,21 +31,21 @@
#include "mongo/client/dbclientcursor.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog_entry.h"
-#include "mongo/db/catalog/rename_collection.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog/index_create.h"
+#include "mongo/db/catalog/rename_collection.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/rename_collection.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index_builder.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/ops/insert.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
diff --git a/src/mongo/db/commands/server_status.cpp b/src/mongo/db/commands/server_status.cpp
index 9570b212f64..bbe7e9930a7 100644
--- a/src/mongo/db/commands/server_status.cpp
+++ b/src/mongo/db/commands/server_status.cpp
@@ -32,13 +32,13 @@
#include "mongo/platform/basic.h"
+#include "mongo/config.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/privilege.h"
#include "mongo/db/client_basic.h"
-#include "mongo/config.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/server_status.h"
#include "mongo/db/commands/server_status_internal.h"
diff --git a/src/mongo/db/commands/server_status.h b/src/mongo/db/commands/server_status.h
index 1ebe57280d7..862cf1960e9 100644
--- a/src/mongo/db/commands/server_status.h
+++ b/src/mongo/db/commands/server_status.h
@@ -30,11 +30,11 @@
#pragma once
-#include <string>
#include "mongo/db/commands.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/stats/counters.h"
#include "mongo/platform/atomic_word.h"
+#include <string>
namespace mongo {
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 5bcfe71e365..c32cc208090 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -41,11 +41,11 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index_builder.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -263,11 +263,11 @@ public:
for (int i = 0; i < n + 1; ++i) {
PlanExecutor::ExecState state = exec->getNext(nullptr, &end);
if (PlanExecutor::ADVANCED != state) {
- return appendCommandStatus(result,
- {ErrorCodes::IllegalOperation,
- str::stream()
- << "invalid n, collection contains fewer than "
- << n << " documents"});
+ return appendCommandStatus(
+ result,
+ {ErrorCodes::IllegalOperation,
+ str::stream() << "invalid n, collection contains fewer than " << n
+ << " documents"});
}
}
}
diff --git a/src/mongo/db/commands/top_command.cpp b/src/mongo/db/commands/top_command.cpp
index e4b788dc711..6f236de90da 100644
--- a/src/mongo/db/commands/top_command.cpp
+++ b/src/mongo/db/commands/top_command.cpp
@@ -33,10 +33,10 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/privilege.h"
#include "mongo/db/client.h"
+#include "mongo/db/commands.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/stats/top.h"
-#include "mongo/db/commands.h"
namespace {
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 696339496eb..2bc8c1d1b51 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -95,7 +95,8 @@ BSONArray roleSetToBSONArray(const unordered_set<RoleName>& roles) {
for (unordered_set<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
const RoleName& role = *it;
rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
<< role.getDB()));
}
return rolesArrayBuilder.arr();
@@ -106,7 +107,8 @@ BSONArray rolesVectorToBSONArray(const std::vector<RoleName>& roles) {
for (std::vector<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
const RoleName& role = *it;
rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
<< role.getDB()));
}
return rolesArrayBuilder.arr();
@@ -172,9 +174,9 @@ Status checkOkayToGrantRolesToRole(OperationContext* txn,
}
if (role.getDB() != "admin" && roleToAdd.getDB() != role.getDB()) {
- return Status(ErrorCodes::InvalidRoleModification,
- str::stream()
- << "Roles on the \'" << role.getDB()
+ return Status(
+ ErrorCodes::InvalidRoleModification,
+ str::stream() << "Roles on the \'" << role.getDB()
<< "\' database cannot be granted roles from other databases");
}
@@ -195,11 +197,11 @@ Status checkOkayToGrantRolesToRole(OperationContext* txn,
}
if (sequenceContains(indirectRoles, role)) {
- return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Granting " << roleToAdd.getFullName() << " to "
- << role.getFullName()
- << " would introduce a cycle in the role graph.");
+ return Status(
+ ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream() << "Granting " << roleToAdd.getFullName() << " to "
+ << role.getFullName()
+ << " would introduce a cycle in the role graph.");
}
}
return Status::OK();
@@ -421,13 +423,14 @@ Status insertRoleDocument(OperationContext* txn, const BSONObj& roleObj) {
* Updates the given role object with the given update modifier.
*/
Status updateRoleDocument(OperationContext* txn, const RoleName& role, const BSONObj& updateObj) {
- Status status = updateOneAuthzDocument(
- txn,
- AuthorizationManager::rolesCollectionNamespace,
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()),
- updateObj,
- false);
+ Status status = updateOneAuthzDocument(txn,
+ AuthorizationManager::rolesCollectionNamespace,
+ BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getDB()),
+ updateObj,
+ false);
if (status.isOK()) {
return status;
}
@@ -481,13 +484,14 @@ Status insertPrivilegeDocument(OperationContext* txn, const BSONObj& userObj) {
Status updatePrivilegeDocument(OperationContext* txn,
const UserName& user,
const BSONObj& updateObj) {
- Status status = updateOneAuthzDocument(
- txn,
- AuthorizationManager::usersCollectionNamespace,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << user.getUser() << AuthorizationManager::USER_DB_FIELD_NAME << user.getDB()),
- updateObj,
- false);
+ Status status = updateOneAuthzDocument(txn,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << user.getUser()
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << user.getDB()),
+ updateObj,
+ false);
if (status.isOK()) {
return status;
}
@@ -554,7 +558,8 @@ Status requireAuthSchemaVersion26Final(OperationContext* txn, AuthorizationManag
str::stream()
<< "User and role management commands require auth data to have "
<< "at least schema version "
- << AuthorizationManager::schemaVersion26Final << " but found "
+ << AuthorizationManager::schemaVersion26Final
+ << " but found "
<< foundSchemaVersion);
}
return writeAuthSchemaVersionIfNeeded(txn, authzManager, foundSchemaVersion);
@@ -577,7 +582,8 @@ Status requireAuthSchemaVersion26UpgradeOrFinal(OperationContext* txn,
return Status(ErrorCodes::AuthSchemaIncompatible,
str::stream() << "The usersInfo and rolesInfo commands require auth data to "
<< "have at least schema version "
- << AuthorizationManager::schemaVersion26Upgrade << " but found "
+ << AuthorizationManager::schemaVersion26Upgrade
+ << " but found "
<< foundSchemaVersion);
}
return Status::OK();
@@ -1901,7 +1907,8 @@ public:
ss << "Drops a single role. Before deleting the role completely it must remove it "
"from any users or roles that reference it. If any errors occur in the middle "
"of that process it's possible to be left in a state where the role has been "
- "removed from some user/roles but otherwise still exists." << endl;
+ "removed from some user/roles but otherwise still exists."
+ << endl;
}
virtual Status checkAuthForCommand(ClientBasic* client,
@@ -1967,11 +1974,12 @@ public:
ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
? ErrorCodes::UserModificationFailed
: status.code();
- return appendCommandStatus(
- result,
- Status(code,
- str::stream() << "Failed to remove role " << roleName.getFullName()
- << " from all users: " << status.reason()));
+ return appendCommandStatus(result,
+ Status(code,
+ str::stream() << "Failed to remove role "
+ << roleName.getFullName()
+ << " from all users: "
+ << status.reason()));
}
// Remove this role from all other roles
@@ -2019,7 +2027,8 @@ public:
Status(status.code(),
str::stream() << "Removed role " << roleName.getFullName()
<< " from all users and roles but failed to actually delete"
- " the role itself: " << status.reason()));
+ " the role itself: "
+ << status.reason()));
}
dassert(nMatched == 0 || nMatched == 1);
@@ -2052,7 +2061,8 @@ public:
"it must remove them from any users or other roles that reference them. If any "
"errors occur in the middle of that process it's possible to be left in a state "
"where the roles have been removed from some user/roles but otherwise still "
- "exist." << endl;
+ "exist."
+ << endl;
}
virtual Status checkAuthForCommand(ClientBasic* client,
@@ -2100,9 +2110,10 @@ public:
: status.code();
return appendCommandStatus(result,
Status(code,
- str::stream()
- << "Failed to remove roles from \"" << dbname
- << "\" db from all users: " << status.reason()));
+ str::stream() << "Failed to remove roles from \""
+ << dbname
+ << "\" db from all users: "
+ << status.reason()));
}
// Remove these roles from all other roles
@@ -2125,9 +2136,10 @@ public:
: status.code();
return appendCommandStatus(result,
Status(code,
- str::stream()
- << "Failed to remove roles from \"" << dbname
- << "\" db from all roles: " << status.reason()));
+ str::stream() << "Failed to remove roles from \""
+ << dbname
+ << "\" db from all roles: "
+ << status.reason()));
}
audit::logDropAllRolesFromDatabase(ClientBasic::getCurrent(), dbname);
@@ -2143,7 +2155,8 @@ public:
str::stream() << "Removed roles from \"" << dbname
<< "\" db "
" from all users and roles but failed to actually delete"
- " those roles themselves: " << status.reason()));
+ " those roles themselves: "
+ << status.reason()));
}
result.append("n", nMatched);
@@ -2535,7 +2548,9 @@ public:
BSONObj query =
db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db);
BSONObj fields = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
+ << 1
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << 1);
Status status =
queryAuthzDocument(txn,
@@ -2613,7 +2628,9 @@ public:
BSONObj query =
db.empty() ? BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db);
BSONObj fields = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
+ << 1
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << 1);
Status status =
queryAuthzDocument(txn,
@@ -2741,7 +2758,8 @@ void updateUserCredentials(OperationContext* txn,
mongoutils::str::stream()
<< "While preparing to upgrade user doc from "
"2.6/3.0 user data schema to the 3.0+ SCRAM only schema, found a user doc "
- "with missing or incorrectly formatted credentials: " << userDoc.toString(),
+ "with missing or incorrectly formatted credentials: "
+ << userDoc.toString(),
credentialsElement.type() == Object);
BSONObj credentialsObj = credentialsElement.Obj();
@@ -2758,7 +2776,8 @@ void updateUserCredentials(OperationContext* txn,
mongoutils::str::stream()
<< "While preparing to upgrade user doc from "
"2.6/3.0 user data schema to the 3.0+ SCRAM only schema, found a user doc "
- "missing MONGODB-CR credentials :" << userDoc.toString(),
+ "missing MONGODB-CR credentials :"
+ << userDoc.toString(),
!mongoCRElement.eoo());
std::string hashedPassword = mongoCRElement.String();
diff --git a/src/mongo/db/commands/user_management_commands_common.cpp b/src/mongo/db/commands/user_management_commands_common.cpp
index 03a6a129fdc..292c21a8255 100644
--- a/src/mongo/db/commands/user_management_commands_common.cpp
+++ b/src/mongo/db/commands/user_management_commands_common.cpp
@@ -66,8 +66,8 @@ Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
for (size_t i = 0; i < roles.size(); ++i) {
if (!authzSession->isAuthorizedToGrantRole(roles[i])) {
return Status(ErrorCodes::Unauthorized,
- str::stream()
- << "Not authorized to grant role: " << roles[i].getFullName());
+ str::stream() << "Not authorized to grant role: "
+ << roles[i].getFullName());
}
}
@@ -91,8 +91,8 @@ Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
for (size_t i = 0; i < roles.size(); ++i) {
if (!authzSession->isAuthorizedToRevokeRole(roles[i])) {
return Status(ErrorCodes::Unauthorized,
- str::stream()
- << "Not authorized to revoke role: " << roles[i].getFullName());
+ str::stream() << "Not authorized to revoke role: "
+ << roles[i].getFullName());
}
}
return Status::OK();
@@ -123,8 +123,8 @@ Status checkAuthForCreateUserCommand(ClientBasic* client,
if (!authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forDatabaseName(args.userName.getDB()), ActionType::createUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream()
- << "Not authorized to create users on db: " << args.userName.getDB());
+ str::stream() << "Not authorized to create users on db: "
+ << args.userName.getDB());
}
return checkAuthorizedToGrantRoles(authzSession, args.roles);
@@ -205,8 +205,8 @@ Status checkAuthForCreateRoleCommand(ClientBasic* client,
if (!authzSession->isAuthorizedToCreateRole(args)) {
return Status(ErrorCodes::Unauthorized,
- str::stream()
- << "Not authorized to create roles on db: " << args.roleName.getDB());
+ str::stream() << "Not authorized to create roles on db: "
+ << args.roleName.getDB());
}
status = checkAuthorizedToGrantRoles(authzSession, args.roles);
@@ -441,7 +441,8 @@ Status checkAuthForRolesInfoCommand(ClientBasic* client,
ActionType::viewRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to view roles from the "
- << args.roleNames[i].getDB() << " database");
+ << args.roleNames[i].getDB()
+ << " database");
}
}
}
diff --git a/src/mongo/db/commands/write_commands/write_commands_common.cpp b/src/mongo/db/commands/write_commands/write_commands_common.cpp
index 82f3ab4db67..aa208a1d3c7 100644
--- a/src/mongo/db/commands/write_commands/write_commands_common.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands_common.cpp
@@ -33,9 +33,9 @@
#include <string>
#include <vector>
-#include "mongo/db/auth/privilege.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
+#include "mongo/db/auth/privilege.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/util/assert_util.h"
diff --git a/src/mongo/db/commands/write_commands/write_commands_common.h b/src/mongo/db/commands/write_commands/write_commands_common.h
index cf47bdc02b1..53ba02aad05 100644
--- a/src/mongo/db/commands/write_commands/write_commands_common.h
+++ b/src/mongo/db/commands/write_commands/write_commands_common.h
@@ -28,9 +28,9 @@
#pragma once
+#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/auth/authorization_session.h"
#include "mongo/s/write_ops/batched_command_request.h"
/**
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 2477b58f02d..672266146e4 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -32,9 +32,9 @@
#include <string>
-#include "mongo/db/service_context.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/server_parameters.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/stacktrace.h"
diff --git a/src/mongo/db/concurrency/lock_manager_defs.h b/src/mongo/db/concurrency/lock_manager_defs.h
index 48fcb073bbe..d794c7c2031 100644
--- a/src/mongo/db/concurrency/lock_manager_defs.h
+++ b/src/mongo/db/concurrency/lock_manager_defs.h
@@ -29,8 +29,8 @@
#pragma once
#include <cstdint>
-#include <string>
#include <limits>
+#include <string>
#include "mongo/base/string_data.h"
#include "mongo/config.h"
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index ff77d6021c3..d16c9f4e11d 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -34,8 +34,8 @@
#include <vector>
-#include "mongo/db/service_context.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/service_context.h"
#include "mongo/platform/compiler.h"
#include "mongo/util/background.h"
#include "mongo/util/concurrency/synchronization.h"
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 3c2a30cd932..e3906d29de9 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -134,7 +134,8 @@ BSONObj upconvertGetMoreEntry(const NamespaceString& nss, CursorId cursorId, int
boost::none, // awaitDataTimeout
boost::none, // term
boost::none // lastKnownCommittedOpTime
- ).toBSON();
+ )
+ .toBSON();
}
} // namespace
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index 09a761d639a..eee95967b96 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -35,9 +35,9 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/server_options.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/util/net/message.h"
#include "mongo/util/progress_meter.h"
#include "mongo/util/time_support.h"
-#include "mongo/util/net/message.h"
namespace mongo {
diff --git a/src/mongo/db/curop_metrics.cpp b/src/mongo/db/curop_metrics.cpp
index 558659ae554..be2ed8e00de 100644
--- a/src/mongo/db/curop_metrics.cpp
+++ b/src/mongo/db/curop_metrics.cpp
@@ -29,9 +29,9 @@
#include "mongo/platform/basic.h"
#include "mongo/base/counter.h"
+#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/curop.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/commands/server_status_metric.h"
namespace mongo {
namespace {
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 547b946492d..b67e6b2b11d 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -441,7 +441,8 @@ static void repairDatabasesAndCheckVersion(OperationContext* txn) {
status = {ErrorCodes::MustUpgrade, status.reason()};
}
severe() << "Unable to start mongod due to an incompatibility with the data files and"
- " this version of mongod: " << status;
+ " this version of mongod: "
+ << status;
severe() << "Please consult our documentation when trying to downgrade to a previous"
" major release";
quickExit(EXIT_NEED_UPGRADE);
@@ -835,9 +836,8 @@ int main(int argc, char* argv[], char** envp) {
}
#endif
-MONGO_INITIALIZER_GENERAL(ForkServer,
- ("EndStartupOptionHandling"),
- ("default"))(InitializerContext* context) {
+MONGO_INITIALIZER_GENERAL(ForkServer, ("EndStartupOptionHandling"), ("default"))
+(InitializerContext* context) {
mongo::forkServerOrDie();
return Status::OK();
}
@@ -949,9 +949,8 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(CreateReplicationManager,
}
#ifdef MONGO_CONFIG_SSL
-MONGO_INITIALIZER_GENERAL(setSSLManagerType,
- MONGO_NO_PREREQUISITES,
- ("SSLManager"))(InitializerContext* context) {
+MONGO_INITIALIZER_GENERAL(setSSLManagerType, MONGO_NO_PREREQUISITES, ("SSLManager"))
+(InitializerContext* context) {
isSSLServer = true;
return Status::OK();
}
diff --git a/src/mongo/db/db.h b/src/mongo/db/db.h
index 5a9da9cafb6..f5ca27cd748 100644
--- a/src/mongo/db/db.h
+++ b/src/mongo/db/db.h
@@ -30,9 +30,9 @@
#include "mongo/platform/basic.h"
+#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
#include "mongo/db/curop.h"
-#include "mongo/db/catalog/database_holder.h"
#include "mongo/util/net/message.h"
namespace mongo {
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index e25c3d09ae0..f1bc7208092 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -30,9 +30,9 @@
#include "mongo/db/db_raii.h"
-#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
#include "mongo/db/curop.h"
#include "mongo/db/repl/replication_coordinator_global.h"
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 4b4a6089364..a7def1cb6c5 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -63,9 +63,9 @@
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/index_builder.h"
-#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/index_access_method.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/index_builder.h"
#include "mongo/db/instance.h"
#include "mongo/db/introspect.h"
#include "mongo/db/jsobj.h"
@@ -90,13 +90,13 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/write_concern.h"
-#include "mongo/rpc/request_interface.h"
-#include "mongo/rpc/reply_builder_interface.h"
#include "mongo/rpc/metadata.h"
#include "mongo/rpc/metadata/config_server_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/rpc/metadata/sharding_metadata.h"
#include "mongo/rpc/protocol.h"
+#include "mongo/rpc/reply_builder_interface.h"
+#include "mongo/rpc/request_interface.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
@@ -1379,7 +1379,8 @@ void Command::execCommand(OperationContext* txn,
34422,
str::stream()
<< "Received a command with sharding chunk version information but this "
- "node is not sharding aware: " << request.getCommandArgs().jsonString(),
+ "node is not sharding aware: "
+ << request.getCommandArgs().jsonString(),
!oss.hasShardVersion() ||
ChunkVersion::isIgnoredVersion(oss.getShardVersion(commandNS)));
}
@@ -1405,8 +1406,8 @@ void Command::execCommand(OperationContext* txn,
// If we got a stale config, wait in case the operation is stuck in a critical section
if (e.getCode() == ErrorCodes::SendStaleConfig) {
auto& sce = static_cast<const StaleConfigException&>(e);
- ShardingState::get(txn)
- ->onStaleShardVersion(txn, NamespaceString(sce.getns()), sce.getVersionReceived());
+ ShardingState::get(txn)->onStaleShardVersion(
+ txn, NamespaceString(sce.getns()), sce.getVersionReceived());
}
BSONObjBuilder metadataBob;
@@ -1509,8 +1510,8 @@ bool Command::run(OperationContext* txn,
// Wait until a snapshot is available.
while (status == ErrorCodes::ReadConcernMajorityNotAvailableYet) {
- LOG(debugLevel)
- << "Snapshot not available for readConcern: " << readConcernArgs;
+ LOG(debugLevel) << "Snapshot not available for readConcern: "
+ << readConcernArgs;
replCoord->waitUntilSnapshotCommitted(txn, SnapshotName::min());
status = txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
}
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index e0a5b6ffcd3..dc88905ae63 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/db.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index/btree_access_method.h"
#include "mongo/db/json.h"
#include "mongo/db/keypattern.h"
@@ -57,16 +56,17 @@
#include "mongo/db/range_arithmetic.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/s/collection_metadata.h"
+#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/data_protector.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h"
#include "mongo/db/write_concern.h"
#include "mongo/db/write_concern_options.h"
-#include "mongo/db/s/collection_metadata.h"
-#include "mongo/db/s/sharding_state.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index 53d0839711b..6386a943552 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -28,8 +28,8 @@
#pragma once
-#include <memory>
#include <boost/filesystem/path.hpp>
+#include <memory>
#include "mongo/db/db.h"
#include "mongo/db/record_id.h"
diff --git a/src/mongo/db/dbwebserver.cpp b/src/mongo/db/dbwebserver.cpp
index c924c0a6ddb..f6088f8be2c 100644
--- a/src/mongo/db/dbwebserver.cpp
+++ b/src/mongo/db/dbwebserver.cpp
@@ -42,14 +42,14 @@
#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/privilege.h"
-#include "mongo/db/auth/user_name.h"
#include "mongo/db/auth/user.h"
+#include "mongo/db/auth/user_name.h"
#include "mongo/db/background.h"
#include "mongo/db/commands.h"
#include "mongo/db/db.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/instance.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/stats/snapshots.h"
#include "mongo/rpc/command_reply.h"
#include "mongo/rpc/command_reply_builder.h"
@@ -451,7 +451,8 @@ void DbWebServer::doRequest(const char* rq,
"These read-only context-less commands can be executed from the web "
"interface. Results are json format, unless ?text=1 is appended in which "
"case the result is output as text for easier human viewing",
- "Commands") << ": ";
+ "Commands")
+ << ": ";
auto m = Command::commandsByBestName();
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index a9bcb8466f2..033949827bc 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/exec/and_common-inl.h"
#include "mongo/db/exec/scoped_timer.h"
-#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/exec/working_set.h"
+#include "mongo/db/exec/working_set_common.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index b984f390590..456062dc41e 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -30,8 +30,8 @@
#include <vector>
-#include "mongo/db/jsobj.h"
#include "mongo/db/exec/plan_stage.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/record_id.h"
#include "mongo/platform/unordered_map.h"
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 3425a2562b5..5ec0329f8b1 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -209,7 +209,8 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
if (!status.isOK()) {
return Status(ErrorCodes::BadValue,
str::stream() << "error processing query: " << _canonicalQuery->toString()
- << " planner returned error: " << status.reason());
+ << " planner returned error: "
+ << status.reason());
}
OwnedPointerVector<QuerySolution> solutions(rawSolutions);
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index e065b955012..99cfaf42e6c 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -31,9 +31,9 @@
#include <list>
#include <memory>
-#include "mongo/db/jsobj.h"
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/working_set.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/query_solution.h"
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index a4179218638..b130d128023 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -30,14 +30,14 @@
#include "mongo/db/exec/collection_scan.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/exec/collection_scan_common.h"
#include "mongo/db/exec/filter.h"
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/exec/working_set.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/concurrency/write_conflict_exception.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/storage/record_fetcher.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/fail_point_service.h"
@@ -74,7 +74,8 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
ErrorCodes::CappedPositionLost,
str::stream()
<< "CollectionScan died due to position in capped collection being deleted. "
- << "Last seen record id: " << _lastSeenId);
+ << "Last seen record id: "
+ << _lastSeenId);
*out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
return PlanStage::DEAD;
}
@@ -107,7 +108,8 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
Status status(ErrorCodes::CappedPositionLost,
str::stream() << "CollectionScan died due to failure to restore "
<< "tailable cursor position. "
- << "Last seen record id: " << _lastSeenId);
+ << "Last seen record id: "
+ << _lastSeenId);
*out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
return PlanStage::DEAD;
}
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index e1e88a3333a..0f3e09314e7 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -38,10 +38,10 @@
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/exec/write_stage_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index b40e9be02eb..c53573706f0 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -35,8 +35,8 @@
#include "third_party/s2/s2regionintersection.h"
#include "mongo/base/owned_pointer_vector.h"
-#include "mongo/db/exec/index_scan.h"
#include "mongo/db/exec/fetch.h"
+#include "mongo/db/exec/index_scan.h"
#include "mongo/db/exec/working_set_computed_data.h"
#include "mongo/db/geo/geoconstants.h"
#include "mongo/db/geo/geoparser.h"
diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h
index 8040a57f183..5d58f616248 100644
--- a/src/mongo/db/exec/geo_near.h
+++ b/src/mongo/db/exec/geo_near.h
@@ -35,8 +35,8 @@
#include "mongo/db/geo/r2_region_coverer.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/s2_common.h"
-#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/matcher/expression.h"
+#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/query/index_bounds.h"
#include "third_party/s2/s2cellunion.h"
diff --git a/src/mongo/db/exec/keep_mutations.h b/src/mongo/db/exec/keep_mutations.h
index 07883c5a7a9..cadfc02940c 100644
--- a/src/mongo/db/exec/keep_mutations.h
+++ b/src/mongo/db/exec/keep_mutations.h
@@ -29,8 +29,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/exec/plan_stage.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/record_id.h"
diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h
index b5e2e690b80..bc62d78d82c 100644
--- a/src/mongo/db/exec/limit.h
+++ b/src/mongo/db/exec/limit.h
@@ -29,8 +29,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/exec/plan_stage.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/record_id.h"
namespace mongo {
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index c57e0a5f02f..7f9185435c9 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -38,8 +38,8 @@
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
-#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/client.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/query/explain.h"
@@ -47,8 +47,8 @@
#include "mongo/db/query/plan_ranker.h"
#include "mongo/db/storage/record_fetcher.h"
#include "mongo/stdx/memory.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 3aff4852c19..1363847caf5 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -29,14 +29,14 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/working_set.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/query/canonical_query.h"
-#include "mongo/db/query/query_solution.h"
#include "mongo/db/query/plan_ranker.h"
#include "mongo/db/query/plan_yield_policy.h"
+#include "mongo/db/query/query_solution.h"
#include "mongo/db/record_id.h"
namespace mongo {
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index e2eba0c6b53..d9ccdc4b92a 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -30,8 +30,8 @@
#include <queue>
-#include "mongo/base/string_data.h"
#include "mongo/base/status_with.h"
+#include "mongo/base/string_data.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/plan_stats.h"
diff --git a/src/mongo/db/exec/pipeline_proxy.h b/src/mongo/db/exec/pipeline_proxy.h
index 791a1ebfa62..bf30e2c8eb5 100644
--- a/src/mongo/db/exec/pipeline_proxy.h
+++ b/src/mongo/db/exec/pipeline_proxy.h
@@ -28,8 +28,8 @@
#pragma once
-#include <boost/optional/optional.hpp>
#include <boost/intrusive_ptr.hpp>
+#include <boost/optional/optional.hpp>
#include "mongo/db/catalog/collection.h"
#include "mongo/db/exec/plan_stage.h"
diff --git a/src/mongo/db/exec/projection_exec_test.cpp b/src/mongo/db/exec/projection_exec_test.cpp
index 4c9689f544e..2399e35fd73 100644
--- a/src/mongo/db/exec/projection_exec_test.cpp
+++ b/src/mongo/db/exec/projection_exec_test.cpp
@@ -32,13 +32,13 @@
#include "mongo/db/exec/projection_exec.h"
-#include <memory>
-#include "mongo/db/json.h"
#include "mongo/db/exec/working_set_computed_data.h"
+#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/unittest/unittest.h"
+#include <memory>
using namespace mongo;
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index ee7d4ab4d15..1b604e1085e 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -33,11 +33,11 @@
#include <algorithm>
#include "mongo/db/catalog/collection.h"
-#include "mongo/db/index_names.h"
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/exec/working_set_computed_data.h"
#include "mongo/db/index/btree_key_generator.h"
+#include "mongo/db/index_names.h"
#include "mongo/db/query/collation/collator_interface.h"
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/lite_parsed_query.h"
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index 650eb9174d1..e2efeb87337 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -28,8 +28,8 @@
#pragma once
-#include <vector>
#include <set>
+#include <vector>
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/sort_key_generator.h"
diff --git a/src/mongo/db/exec/sort_key_generator.h b/src/mongo/db/exec/sort_key_generator.h
index b63b56b0c4d..c4d168f02e3 100644
--- a/src/mongo/db/exec/sort_key_generator.h
+++ b/src/mongo/db/exec/sort_key_generator.h
@@ -31,8 +31,8 @@
#include <memory>
#include "mongo/bson/bsonobj.h"
-#include "mongo/db/index/btree_key_generator.h"
#include "mongo/db/exec/plan_stage.h"
+#include "mongo/db/index/btree_key_generator.h"
#include "mongo/db/query/index_bounds.h"
#include "mongo/db/query/stage_types.h"
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index f5316a7a94c..837300e3ace 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -212,12 +212,12 @@ public:
<< PlanExecutor::statestr(state)
<< ", stats: " << Explain::getWinningPlanStats(exec.get());
- return appendCommandStatus(
- result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during "
- << "StageDebug command: " << WorkingSetCommon::toStatusString(obj)));
+ return appendCommandStatus(result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream()
+ << "Executor error during "
+ << "StageDebug command: "
+ << WorkingSetCommon::toStatusString(obj)));
}
return true;
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 8be5da178dc..5d1fee15703 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -38,8 +38,8 @@
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/plan_executor.h"
-#include "mongo/db/query/planner_analysis.h"
#include "mongo/db/query/planner_access.h"
+#include "mongo/db/query/planner_analysis.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/db/query/stage_builder.h"
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index fdc53a11a25..ddbcc1d9a46 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -32,9 +32,9 @@
#include "mongo/db/exec/filter.h"
#include "mongo/db/exec/index_scan.h"
-#include "mongo/db/exec/text_or.h"
-#include "mongo/db/exec/text_match.h"
#include "mongo/db/exec/scoped_timer.h"
+#include "mongo/db/exec/text_match.h"
+#include "mongo/db/exec/text_or.h"
#include "mongo/db/exec/working_set.h"
#include "mongo/db/fts/fts_index_format.h"
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/db/exec/text_match.cpp b/src/mongo/db/exec/text_match.cpp
index 0d94a8a5bfb..f9aabab4f2a 100644
--- a/src/mongo/db/exec/text_match.cpp
+++ b/src/mongo/db/exec/text_match.cpp
@@ -34,8 +34,8 @@
#include "mongo/db/exec/working_set.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/jsobj.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/stdx/memory.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index b06197460ac..7fca4f3906b 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -37,11 +37,11 @@
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/exec/write_stage_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/ops/update_lifecycle.h"
#include "mongo/db/query/explain.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -146,7 +146,8 @@ Status validateDollarPrefixElement(const mb::ConstElement elem, const bool deep)
// not an okay, $ prefixed field name.
return Status(ErrorCodes::DollarPrefixedFieldName,
str::stream() << "The dollar ($) prefixed field '" << elem.getFieldName()
- << "' in '" << mb::getFullName(elem)
+ << "' in '"
+ << mb::getFullName(elem)
<< "' is not valid for storage.");
}
@@ -198,7 +199,8 @@ Status storageValid(const mb::ConstElement& elem, const bool deep) {
// Field name cannot have a "." in it.
return Status(ErrorCodes::DottedFieldName,
str::stream() << "The dotted field '" << elem.getFieldName() << "' in '"
- << mb::getFullName(elem) << "' is not valid for storage.");
+ << mb::getFullName(elem)
+ << "' is not valid for storage.");
}
}
@@ -340,9 +342,12 @@ inline Status validate(const BSONObj& original,
return Status(ErrorCodes::ImmutableField,
mongoutils::str::stream()
<< "After applying the update to the document with "
- << newIdElem.toString() << ", the '" << current.dottedField()
+ << newIdElem.toString()
+ << ", the '"
+ << current.dottedField()
<< "' (required and immutable) field was "
- "found to have been removed --" << original);
+ "found to have been removed --"
+ << original);
}
} else {
// Find the potentially affected field in the original document.
@@ -358,7 +363,8 @@ inline Status validate(const BSONObj& original,
mongoutils::str::stream()
<< "After applying the update to the document {"
<< (oldIdElem.ok() ? oldIdElem.toString() : newIdElem.toString())
- << " , ...}, the (immutable) field '" << current.dottedField()
+ << " , ...}, the (immutable) field '"
+ << current.dottedField()
<< "' was found to be an array or array descendant.");
}
currElem = currElem.parent();
@@ -369,8 +375,10 @@ inline Status validate(const BSONObj& original,
return Status(ErrorCodes::ImmutableField,
mongoutils::str::stream()
<< "After applying the update to the document {"
- << oldElem.toString() << " , ...}, the (immutable) field '"
- << current.dottedField() << "' was found to have been altered to "
+ << oldElem.toString()
+ << " , ...}, the (immutable) field '"
+ << current.dottedField()
+ << "' was found to have been altered to "
<< newElem.toString());
}
}
diff --git a/src/mongo/db/exec/working_set.h b/src/mongo/db/exec/working_set.h
index 57e3bdc9a5a..8dab9943f3d 100644
--- a/src/mongo/db/exec/working_set.h
+++ b/src/mongo/db/exec/working_set.h
@@ -28,8 +28,8 @@
#pragma once
-#include <vector>
#include <unordered_set>
+#include <vector>
#include "mongo/base/disallow_copying.h"
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index 1981985a831..a4052435d63 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -31,11 +31,11 @@
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/catalog/collection.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/exec/working_set.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/service_context.h"
+#include "mongo/db/service_context.h"
namespace mongo {
diff --git a/src/mongo/db/exec/working_set_test.cpp b/src/mongo/db/exec/working_set_test.cpp
index c0c98facdf7..378e4b08907 100644
--- a/src/mongo/db/exec/working_set_test.cpp
+++ b/src/mongo/db/exec/working_set_test.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/exec/working_set.h"
-#include "mongo/db/json.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/json.h"
#include "mongo/db/storage/snapshot.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
diff --git a/src/mongo/db/field_parser_test.cpp b/src/mongo/db/field_parser_test.cpp
index 2e9027362c8..428699484b9 100644
--- a/src/mongo/db/field_parser_test.cpp
+++ b/src/mongo/db/field_parser_test.cpp
@@ -26,9 +26,9 @@
* it in the license file.
*/
+#include <map>
#include <string>
#include <vector>
-#include <map>
#include "mongo/db/field_parser.h"
#include "mongo/db/jsobj.h"
@@ -78,7 +78,9 @@ protected:
valLong = 1LL;
doc = BSON(aBool(valBool) << anArray(valArray) << anObj(valObj) << aDate(valDate)
- << aString(valString) << anOID(valOID) << aLong(valLong));
+ << aString(valString)
+ << anOID(valOID)
+ << aLong(valLong));
}
void tearDown() {}
@@ -315,9 +317,13 @@ TEST(ComplexExtraction, GetObjectMap) {
BSONObjBuilder bob;
bob << mapField() << BSON("a" << BSON("a"
- << "a") << "b" << BSON("b"
- << "b") << "c" << BSON("c"
- << "c"));
+ << "a")
+ << "b"
+ << BSON("b"
+ << "b")
+ << "c"
+ << BSON("c"
+ << "c"));
BSONObj obj = bob.obj();
map<string, BSONObj> parsedMap;
@@ -342,7 +348,9 @@ TEST(ComplexExtraction, GetBadMap) {
BSONObjBuilder bob;
bob << mapField() << BSON("a"
<< "a"
- << "b" << 123 << "c"
+ << "b"
+ << 123
+ << "c"
<< "c");
BSONObj obj = bob.obj();
@@ -421,7 +429,9 @@ TEST(ComplexExtraction, GetBadNestedMap) {
BSONObj nestedMapObj = BSON("a"
<< "a"
- << "b" << 123 << "c"
+ << "b"
+ << 123
+ << "c"
<< "c");
BSONObjBuilder bob;
diff --git a/src/mongo/db/ftdc/compressor_test.cpp b/src/mongo/db/ftdc/compressor_test.cpp
index 6950dd93f66..688197a392a 100644
--- a/src/mongo/db/ftdc/compressor_test.cpp
+++ b/src/mongo/db/ftdc/compressor_test.cpp
@@ -67,12 +67,18 @@ TEST(FTDCCompressor, TestBasic) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42),
+ << "key1"
+ << 33
+ << "key2"
+ << 42),
Date_t());
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t());
ASSERT_HAS_SPACE(st);
@@ -179,89 +185,141 @@ TEST(FTDCCompressor, TestSchemaChanges) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42));
+ << "key1"
+ << 33
+ << "key2"
+ << 42));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
ASSERT_HAS_SPACE(st);
// Add Field
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45 << "key3" << 47));
+ << "key1"
+ << 34
+ << "key2"
+ << 45
+ << "key3"
+ << 47));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45 << "key3" << 47));
+ << "key1"
+ << 34
+ << "key2"
+ << 45
+ << "key3"
+ << 47));
ASSERT_HAS_SPACE(st);
// Rename field
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key5" << 45 << "key3" << 47));
+ << "key1"
+ << 34
+ << "key5"
+ << 45
+ << "key3"
+ << 47));
ASSERT_SCHEMA_CHANGED(st);
// Change type
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key5"
+ << "key1"
+ << 34
+ << "key5"
<< "45"
- << "key3" << 47));
+ << "key3"
+ << 47));
ASSERT_SCHEMA_CHANGED(st);
// Add Field
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45 << "key3" << 47 << "key7" << 34 << "key9"
- << 45 << "key13" << 47));
+ << "key1"
+ << 34
+ << "key2"
+ << 45
+ << "key3"
+ << 47
+ << "key7"
+ << 34
+ << "key9"
+ << 45
+ << "key13"
+ << 47));
ASSERT_SCHEMA_CHANGED(st);
// Remove Field
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << 34 << "key9" << 45 << "key13" << 47));
+ << "key7"
+ << 34
+ << "key9"
+ << 45
+ << "key13"
+ << 47));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << 34 << "key9" << 45 << "key13" << 47));
+ << "key7"
+ << 34
+ << "key9"
+ << 45
+ << "key13"
+ << 47));
ASSERT_HAS_SPACE(st);
// Start new batch
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << 5));
+ << "key7"
+ << 5));
ASSERT_SCHEMA_CHANGED(st);
// Change field to object
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << BSON( // nested object
- "a" << 1)));
+ << "key7"
+ << BSON( // nested object
+ "a" << 1)));
ASSERT_SCHEMA_CHANGED(st);
// Change field from object to number
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << 7));
+ << "key7"
+ << 7));
ASSERT_SCHEMA_CHANGED(st);
// Change field from number to array
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << BSON_ARRAY(13 << 17)));
+ << "key7"
+ << BSON_ARRAY(13 << 17)));
ASSERT_SCHEMA_CHANGED(st);
// Change field from array to number
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << 19));
+ << "key7"
+ << 19));
ASSERT_SCHEMA_CHANGED(st);
@@ -288,15 +346,24 @@ TEST(FTDCCompressor, TestNumbersCompat) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42LL));
+ << "key1"
+ << 33
+ << "key2"
+ << 42LL));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34LL << "key2" << 45.0f));
+ << "key1"
+ << 34LL
+ << "key2"
+ << 45.0f));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << static_cast<char>(32) << "key2" << 45.0F));
+ << "key1"
+ << static_cast<char>(32)
+ << "key2"
+ << 45.0F));
ASSERT_HAS_SPACE(st);
}
@@ -320,31 +387,49 @@ TEST(FTDCCompressor, Types) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42LL));
+ << "key1"
+ << 33
+ << "key2"
+ << 42LL));
ASSERT_HAS_SPACE(st);
const char bytes[] = {0x1, 0x2, 0x3};
- BSONObj o = BSON("created" << DATENOW // date_t
- << "null" << BSONNULL // { a : null }
- << "undefined" << BSONUndefined // { a : undefined }
- << "obj" << BSON( // nested object
- "a"
- << "abc"
- << "b" << 123LL) << "foo"
+ BSONObj o = BSON("created" << DATENOW // date_t
+ << "null"
+ << BSONNULL // { a : null }
+ << "undefined"
+ << BSONUndefined // { a : undefined }
+ << "obj"
+ << BSON( // nested object
+ "a"
+ << "abc"
+ << "b"
+ << 123LL)
+ << "foo"
<< BSON_ARRAY("bar"
<< "baz"
- << "qux") // array of strings
- << "foo2" << BSON_ARRAY(5 << 6 << 7) // array of ints
- << "bindata" << BSONBinData(&bytes[0], 3, bdtCustom) // bindata
- << "oid" << OID("010203040506070809101112") // oid
- << "bool" << true // bool
- << "regex" << BSONRegEx("mongodb") // regex
- << "ref" << BSONDBRef("c", OID("010203040506070809101112")) // ref
- << "code" << BSONCode("func f() { return 1; }") // code
- << "codewscope" << BSONCodeWScope("func f() { return 1; }",
- BSON("c" << true)) // codew
- << "minkey" << MINKEY // minkey
- << "maxkey" << MAXKEY // maxkey
+ << "qux") // array of strings
+ << "foo2"
+ << BSON_ARRAY(5 << 6 << 7) // array of ints
+ << "bindata"
+ << BSONBinData(&bytes[0], 3, bdtCustom) // bindata
+ << "oid"
+ << OID("010203040506070809101112") // oid
+ << "bool"
+ << true // bool
+ << "regex"
+ << BSONRegEx("mongodb") // regex
+ << "ref"
+ << BSONDBRef("c", OID("010203040506070809101112")) // ref
+ << "code"
+ << BSONCode("func f() { return 1; }") // code
+ << "codewscope"
+ << BSONCodeWScope("func f() { return 1; }",
+ BSON("c" << true)) // codew
+ << "minkey"
+ << MINKEY // minkey
+ << "maxkey"
+ << MAXKEY // maxkey
);
st = c.addSample(o);
@@ -355,11 +440,17 @@ TEST(FTDCCompressor, Types) {
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34LL << "key2" << 45.0f));
+ << "key1"
+ << 34LL
+ << "key2"
+ << 45.0f));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << static_cast<char>(32) << "key2" << 45.0F));
+ << "key1"
+ << static_cast<char>(32)
+ << "key2"
+ << 45.0F));
ASSERT_HAS_SPACE(st);
}
@@ -371,25 +462,37 @@ TEST(FTDCCompressor, TestFull) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42));
+ << "key1"
+ << 33
+ << "key2"
+ << 42));
ASSERT_HAS_SPACE(st);
for (size_t i = 0; i != FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << static_cast<long long int>(i * j) << "key2" << 45));
+ << "key1"
+ << static_cast<long long int>(i * j)
+ << "key2"
+ << 45));
ASSERT_HAS_SPACE(st);
}
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
ASSERT_FULL(st);
// Add Value
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
ASSERT_HAS_SPACE(st);
}
}
diff --git a/src/mongo/db/ftdc/file_manager.cpp b/src/mongo/db/ftdc/file_manager.cpp
index dbefc3fd451..146ad60120c 100644
--- a/src/mongo/db/ftdc/file_manager.cpp
+++ b/src/mongo/db/ftdc/file_manager.cpp
@@ -71,8 +71,8 @@ StatusWith<std::unique_ptr<FTDCFileManager>> FTDCFileManager::create(
boost::filesystem::create_directories(dir, ec);
if (ec) {
return {ErrorCodes::NonExistentPath,
- str::stream() << "\'" << dir.generic_string()
- << "\' could not be created: " << ec.message()};
+ str::stream() << "\'" << dir.generic_string() << "\' could not be created: "
+ << ec.message()};
}
}
@@ -241,7 +241,8 @@ FTDCFileManager::recoverInterimFile() {
log() << "Unclean full-time diagnostic data capture shutdown detected, found interim file, "
"but failed "
"to open it, some "
- "metrics may have been lost. " << s;
+ "metrics may have been lost. "
+ << s;
// Note: We ignore any actual errors as reading from the interim files is a best-effort
return docs;
@@ -258,7 +259,8 @@ FTDCFileManager::recoverInterimFile() {
if (!m.isOK() || !docs.empty()) {
log() << "Unclean full-time diagnostic data capture shutdown detected, found interim file, "
"some "
- "metrics may have been lost. " << m.getStatus();
+ "metrics may have been lost. "
+ << m.getStatus();
}
// Note: We ignore any actual errors as reading from the interim files is a best-effort
diff --git a/src/mongo/db/ftdc/file_manager_test.cpp b/src/mongo/db/ftdc/file_manager_test.cpp
index 4d10c659bbc..6c2e5c220a6 100644
--- a/src/mongo/db/ftdc/file_manager_test.cpp
+++ b/src/mongo/db/ftdc/file_manager_test.cpp
@@ -28,8 +28,8 @@
#include "mongo/platform/basic.h"
-#include <boost/filesystem.hpp>
#include <algorithm>
+#include <boost/filesystem.hpp>
#include <iostream>
#include <string>
@@ -69,34 +69,45 @@ TEST(FTDCFileManagerTest, TestFull) {
// Test a large numbers of zeros, and incremental numbers in a full buffer
for (int j = 0; j < 10; j++) {
- ASSERT_OK(
- mgr->writeSampleAndRotateIfNeeded(client,
- BSON("name"
- << "joe"
- << "key1" << 3230792343LL << "key2" << 235135),
- Date_t()));
+ ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
+ BSON("name"
+ << "joe"
+ << "key1"
+ << 3230792343LL
+ << "key2"
+ << 235135),
+ Date_t()));
for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
- ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(
- client,
- BSON("name"
- << "joe"
- << "key1" << static_cast<long long int>(i * j * 37) << "key2"
- << static_cast<long long int>(i * (645 << j))),
- Date_t()));
+ ASSERT_OK(
+ mgr->writeSampleAndRotateIfNeeded(client,
+ BSON("name"
+ << "joe"
+ << "key1"
+ << static_cast<long long int>(i * j * 37)
+ << "key2"
+ << static_cast<long long int>(i *
+ (645 << j))),
+ Date_t()));
}
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t()));
}
@@ -161,7 +172,9 @@ TEST(FTDCFileManagerTest, TestNormalRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 3230792343LL << "key2"
+ << "key1"
+ << 3230792343LL
+ << "key2"
<< 235135),
Date_t()));
@@ -171,7 +184,9 @@ TEST(FTDCFileManagerTest, TestNormalRestart) {
client,
BSON("name"
<< "joe"
- << "key1" << static_cast<long long int>(i * j * 37) << "key2"
+ << "key1"
+ << static_cast<long long int>(i * j * 37)
+ << "key2"
<< static_cast<long long int>(i * (645 << j))),
Date_t()));
}
@@ -179,14 +194,20 @@ TEST(FTDCFileManagerTest, TestNormalRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t()));
}
@@ -221,7 +242,9 @@ TEST(FTDCFileManagerTest, TestCorruptCrashRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 3230792343LL << "key2"
+ << "key1"
+ << 3230792343LL
+ << "key2"
<< 235135),
Date_t()));
@@ -231,7 +254,9 @@ TEST(FTDCFileManagerTest, TestCorruptCrashRestart) {
client,
BSON("name"
<< "joe"
- << "key1" << static_cast<long long int>(i * j * 37) << "key2"
+ << "key1"
+ << static_cast<long long int>(i * j * 37)
+ << "key2"
<< static_cast<long long int>(i * (645 << j))),
Date_t()));
}
@@ -239,14 +264,20 @@ TEST(FTDCFileManagerTest, TestCorruptCrashRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t()));
}
@@ -277,14 +308,23 @@ TEST(FTDCFileManagerTest, TestNormalCrashInterim) {
BSONObj mdoc1 = BSON("name"
<< "some_metadata"
- << "key1" << 34 << "something" << 98);
+ << "key1"
+ << 34
+ << "something"
+ << 98);
BSONObj sdoc1 = BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45);
+ << "key1"
+ << 34
+ << "key2"
+ << 45);
BSONObj sdoc2 = BSON("name"
<< "joe"
- << "key3" << 34 << "key5" << 45);
+ << "key3"
+ << 34
+ << "key5"
+ << 45);
boost::filesystem::path fileOut;
diff --git a/src/mongo/db/ftdc/file_reader.cpp b/src/mongo/db/ftdc/file_reader.cpp
index 55af92327d5..54b79151958 100644
--- a/src/mongo/db/ftdc/file_reader.cpp
+++ b/src/mongo/db/ftdc/file_reader.cpp
@@ -194,7 +194,8 @@ StatusWith<BSONObj> FTDCFileReader::readDocument() {
if (readSize != _stream.gcount()) {
return {ErrorCodes::FileStreamFailed,
str::stream() << "Failed to read " << readSize << " bytes from file \'"
- << _file.generic_string() << "\'"};
+ << _file.generic_string()
+ << "\'"};
}
ConstDataRange cdr(_buffer.data(), _buffer.data() + bsonLength);
diff --git a/src/mongo/db/ftdc/file_writer_test.cpp b/src/mongo/db/ftdc/file_writer_test.cpp
index 6555a13c115..00124eb2c36 100644
--- a/src/mongo/db/ftdc/file_writer_test.cpp
+++ b/src/mongo/db/ftdc/file_writer_test.cpp
@@ -56,10 +56,16 @@ TEST(FTDCFileTest, TestFileBasicMetadata) {
BSONObj doc1 = BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45);
+ << "key1"
+ << 34
+ << "key2"
+ << 45);
BSONObj doc2 = BSON("name"
<< "joe"
- << "key3" << 34 << "key5" << 45);
+ << "key3"
+ << 34
+ << "key5"
+ << 45);
FTDCConfig config;
FTDCFileWriter writer(&config);
@@ -101,10 +107,16 @@ TEST(FTDCFileTest, TestFileBasicCompress) {
BSONObj doc1 = BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45);
+ << "key1"
+ << 34
+ << "key2"
+ << 45);
BSONObj doc2 = BSON("name"
<< "joe"
- << "key3" << 34 << "key5" << 45);
+ << "key3"
+ << 34
+ << "key5"
+ << 45);
FTDCConfig config;
FTDCFileWriter writer(&config);
@@ -180,41 +192,69 @@ TEST(FTDCFileTest, TestSchemaChanges) {
c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42));
+ << "key1"
+ << 33
+ << "key2"
+ << 42));
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
// Add Value
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45 << "key3" << 47));
+ << "key1"
+ << 34
+ << "key2"
+ << 45
+ << "key3"
+ << 47));
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45 << "key3" << 47));
+ << "key1"
+ << 34
+ << "key2"
+ << 45
+ << "key3"
+ << 47));
// Rename field
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key5" << 45 << "key3" << 47));
+ << "key1"
+ << 34
+ << "key5"
+ << 45
+ << "key3"
+ << 47));
// Change type
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key5"
+ << "key1"
+ << 34
+ << "key5"
<< "45"
- << "key3" << 47));
+ << "key3"
+ << 47));
// RemoveField
c.addSample(BSON("name"
<< "joe"
<< "key5"
<< "45"
- << "key3" << 47));
+ << "key3"
+ << 47));
}
// Test a full buffer
@@ -225,22 +265,34 @@ TEST(FTDCFileTest, TestFull) {
c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42));
+ << "key1"
+ << 33
+ << "key2"
+ << 42));
for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
c.addSample(BSON("name"
<< "joe"
- << "key1" << static_cast<long long int>(i * j) << "key2" << 45));
+ << "key1"
+ << static_cast<long long int>(i * j)
+ << "key2"
+ << 45));
}
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
// Add Value
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
}
}
diff --git a/src/mongo/db/ftdc/ftdc_test.cpp b/src/mongo/db/ftdc/ftdc_test.cpp
index b09aa2b6ef5..3a010ab32c5 100644
--- a/src/mongo/db/ftdc/ftdc_test.cpp
+++ b/src/mongo/db/ftdc/ftdc_test.cpp
@@ -109,8 +109,8 @@ void createDirectoryClean(const boost::filesystem::path& dir) {
boost::filesystem::create_directory(dir);
}
-MONGO_INITIALIZER_WITH_PREREQUISITES(FTDCTestInit,
- ("ThreadNameInitializer"))(InitializerContext* context) {
+MONGO_INITIALIZER_WITH_PREREQUISITES(FTDCTestInit, ("ThreadNameInitializer"))
+(InitializerContext* context) {
setGlobalServiceContext(stdx::make_unique<ServiceContextNoop>());
getGlobalServiceContext()->setFastClockSource(stdx::make_unique<ClockSourceMock>());
diff --git a/src/mongo/db/ftdc/util.cpp b/src/mongo/db/ftdc/util.cpp
index ea87e6b58e5..d56eb8ca380 100644
--- a/src/mongo/db/ftdc/util.cpp
+++ b/src/mongo/db/ftdc/util.cpp
@@ -152,7 +152,8 @@ StatusWith<bool> extractMetricsFromDocument(const BSONObj& referenceDoc,
!(referenceElement.isNumber() == true &&
currentElement.isNumber() == referenceElement.isNumber())) {
LOG(4) << "full-time diagnostic data capture schema change: field type change for "
- "field '" << referenceElement.fieldNameStringData() << "' from '"
+ "field '"
+ << referenceElement.fieldNameStringData() << "' from '"
<< static_cast<int>(referenceElement.type()) << "' to '"
<< static_cast<int>(currentElement.type()) << "'";
matches = false;
@@ -371,7 +372,9 @@ StatusWith<FTDCType> getBSONDocumentType(const BSONObj& obj) {
static_cast<FTDCType>(value) != FTDCType::kMetadata) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << std::string(kFTDCTypeField)
- << "' is not an expected value, found '" << value << "'"};
+ << "' is not an expected value, found '"
+ << value
+ << "'"};
}
return {static_cast<FTDCType>(value)};
diff --git a/src/mongo/db/ftdc/varint.h b/src/mongo/db/ftdc/varint.h
index 0dd4c73fb1b..beb0313b9ac 100644
--- a/src/mongo/db/ftdc/varint.h
+++ b/src/mongo/db/ftdc/varint.h
@@ -31,8 +31,8 @@
#include <cstddef>
#include <cstdint>
-#include "mongo/base/status.h"
#include "mongo/base/data_type.h"
+#include "mongo/base/status.h"
namespace mongo {
/**
diff --git a/src/mongo/db/fts/fts_element_iterator.cpp b/src/mongo/db/fts/fts_element_iterator.cpp
index 4df642dc66a..0e2d0b8d463 100644
--- a/src/mongo/db/fts/fts_element_iterator.cpp
+++ b/src/mongo/db/fts/fts_element_iterator.cpp
@@ -47,7 +47,8 @@ extern const double MAX_WEIGHT;
std::ostream& operator<<(std::ostream& os, FTSElementIterator::FTSIteratorFrame& frame) {
BSONObjIterator it = frame._it;
return os << "FTSIteratorFrame["
- " element=" << (*it).toString() << ", _language=" << frame._language->str()
+ " element="
+ << (*it).toString() << ", _language=" << frame._language->str()
<< ", _parentPath=" << frame._parentPath << ", _isArray=" << frame._isArray << "]";
}
diff --git a/src/mongo/db/fts/fts_index_format.cpp b/src/mongo/db/fts/fts_index_format.cpp
index 30814b54d78..c9698d59c93 100644
--- a/src/mongo/db/fts/fts_index_format.cpp
+++ b/src/mongo/db/fts/fts_index_format.cpp
@@ -139,7 +139,8 @@ void FTSIndexFormat::getKeys(const FTSSpec& spec, const BSONObj& obj, BSONObjSet
uassert(16732,
mongoutils::str::stream() << "too many unique keys for a single document to"
- << " have a text index, max is " << term_freqs.size()
+ << " have a text index, max is "
+ << term_freqs.size()
<< obj["_id"],
term_freqs.size() <= 400000);
@@ -173,7 +174,9 @@ void FTSIndexFormat::getKeys(const FTSSpec& spec, const BSONObj& obj, BSONObjSet
uassert(16733,
mongoutils::str::stream()
<< "trying to index text where term list is too big, max is "
- << MaxKeyBSONSizeMB << "mb " << obj["_id"],
+ << MaxKeyBSONSizeMB
+ << "mb "
+ << obj["_id"],
keyBSONSize <= (MaxKeyBSONSizeMB * 1024 * 1024));
}
}
diff --git a/src/mongo/db/fts/fts_index_format_test.cpp b/src/mongo/db/fts/fts_index_format_test.cpp
index af353d51f26..03eb7406a79 100644
--- a/src/mongo/db/fts/fts_index_format_test.cpp
+++ b/src/mongo/db/fts/fts_index_format_test.cpp
@@ -36,9 +36,9 @@
#include "mongo/db/fts/fts_index_format.h"
#include "mongo/db/fts/fts_spec.h"
+#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
-#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -67,12 +67,14 @@ TEST(FTSIndexFormat, Simple1) {
TEST(FTSIndexFormat, ExtraBack1) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text"
- << "x" << 1)))));
+ << "x"
+ << 1)))));
BSONObjSet keys;
FTSIndexFormat::getKeys(spec,
BSON("data"
<< "cat"
- << "x" << 5),
+ << "x"
+ << 5),
&keys);
ASSERT_EQUALS(1U, keys.size());
@@ -91,7 +93,8 @@ TEST(FTSIndexFormat, ExtraFront1) {
FTSIndexFormat::getKeys(spec,
BSON("data"
<< "cat"
- << "x" << 5),
+ << "x"
+ << 5),
&keys);
ASSERT_EQUALS(1U, keys.size());
@@ -152,9 +155,10 @@ void assertEqualsIndexKeys(std::set<std::string>& expectedKeys, const BSONObjSet
* Terms that are too long are not truncated in version 1.
*/
TEST(FTSIndexFormat, LongWordsTextIndexVersion1) {
- FTSSpec spec(
- assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
- << "text") << "textIndexVersion" << 1))));
+ FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
+ << "text")
+ << "textIndexVersion"
+ << 1))));
BSONObjSet keys;
string longPrefix(1024U, 'a');
// "aaa...aaacat"
@@ -181,9 +185,10 @@ TEST(FTSIndexFormat, LongWordsTextIndexVersion1) {
* characters of the term to form the index key.
*/
TEST(FTSIndexFormat, LongWordTextIndexVersion2) {
- FTSSpec spec(
- assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
- << "text") << "textIndexVersion" << 2))));
+ FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
+ << "text")
+ << "textIndexVersion"
+ << 2))));
BSONObjSet keys;
string longPrefix(1024U, 'a');
// "aaa...aaacat"
@@ -215,9 +220,10 @@ TEST(FTSIndexFormat, LongWordTextIndexVersion2) {
* characters of the term to form the index key.
*/
TEST(FTSIndexFormat, LongWordTextIndexVersion3) {
- FTSSpec spec(
- assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
- << "text") << "textIndexVersion" << 3))));
+ FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
+ << "text")
+ << "textIndexVersion"
+ << 3))));
BSONObjSet keys;
string longPrefix(1024U, 'a');
// "aaa...aaacat"
diff --git a/src/mongo/db/fts/fts_language.cpp b/src/mongo/db/fts/fts_language.cpp
index b01e9de6508..f52002b9be0 100644
--- a/src/mongo/db/fts/fts_language.cpp
+++ b/src/mongo/db/fts/fts_language.cpp
@@ -279,10 +279,11 @@ StatusWithFTSLanguage FTSLanguage::make(StringData langName, TextIndexVersion te
if (it == languageMap->end()) {
// TEXT_INDEX_VERSION_2 and above reject unrecognized language strings.
- Status status = Status(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "unsupported language: \"" << langName
- << "\" for text index version " << textIndexVersion);
+ Status status =
+ Status(ErrorCodes::BadValue,
+ mongoutils::str::stream() << "unsupported language: \"" << langName
+ << "\" for text index version "
+ << textIndexVersion);
return StatusWithFTSLanguage(status);
}
diff --git a/src/mongo/db/fts/fts_language.h b/src/mongo/db/fts/fts_language.h
index 062a3255ba1..49da2439529 100644
--- a/src/mongo/db/fts/fts_language.h
+++ b/src/mongo/db/fts/fts_language.h
@@ -30,11 +30,11 @@
#pragma once
+#include "mongo/base/status_with.h"
#include "mongo/db/fts/fts_basic_phrase_matcher.h"
#include "mongo/db/fts/fts_phrase_matcher.h"
#include "mongo/db/fts/fts_unicode_phrase_matcher.h"
#include "mongo/db/fts/fts_util.h"
-#include "mongo/base/status_with.h"
#include <string>
diff --git a/src/mongo/db/fts/fts_language_test.cpp b/src/mongo/db/fts/fts_language_test.cpp
index 87e37272850..3049d8d4af8 100644
--- a/src/mongo/db/fts/fts_language_test.cpp
+++ b/src/mongo/db/fts/fts_language_test.cpp
@@ -28,9 +28,9 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
#include "mongo/db/fts/fts_language.h"
#include "mongo/db/fts/fts_spec.h"
+#include "mongo/platform/basic.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
diff --git a/src/mongo/db/fts/fts_matcher.cpp b/src/mongo/db/fts/fts_matcher.cpp
index 6a782e730cc..b3107418542 100644
--- a/src/mongo/db/fts/fts_matcher.cpp
+++ b/src/mongo/db/fts/fts_matcher.cpp
@@ -30,10 +30,10 @@
#include "mongo/platform/basic.h"
+#include "mongo/db/fts/fts_element_iterator.h"
#include "mongo/db/fts/fts_matcher.h"
#include "mongo/db/fts/fts_phrase_matcher.h"
#include "mongo/db/fts/fts_tokenizer.h"
-#include "mongo/db/fts/fts_element_iterator.h"
namespace mongo {
diff --git a/src/mongo/db/fts/fts_query_impl.cpp b/src/mongo/db/fts/fts_query_impl.cpp
index bc879375e88..040333de5c7 100644
--- a/src/mongo/db/fts/fts_query_impl.cpp
+++ b/src/mongo/db/fts/fts_query_impl.cpp
@@ -32,12 +32,12 @@
#include "mongo/db/fts/fts_query_impl.h"
-#include "mongo/db/fts/fts_spec.h"
#include "mongo/db/fts/fts_query_parser.h"
+#include "mongo/db/fts/fts_spec.h"
#include "mongo/db/fts/fts_tokenizer.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/stringutils.h"
-#include "mongo/stdx/memory.h"
namespace mongo {
diff --git a/src/mongo/db/fts/fts_query_impl_test.cpp b/src/mongo/db/fts/fts_query_impl_test.cpp
index 25f0f0fd211..43585e8a982 100644
--- a/src/mongo/db/fts/fts_query_impl_test.cpp
+++ b/src/mongo/db/fts/fts_query_impl_test.cpp
@@ -159,9 +159,8 @@ TEST(FTSQueryImpl, Phrase1) {
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
ASSERT_EQUALS(q.toBSON(),
- fromjson(
- "{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
- "test'], negatedPhrases: []}"));
+ fromjson("{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
+ "test'], negatedPhrases: []}"));
ASSERT_TRUE(q.getTermsForBounds() == q.getPositiveTerms());
}
@@ -197,9 +196,8 @@ TEST(FTSQueryImpl, HyphenSurroundedByWhitespaceBeforePhraseShouldNotNegateEntire
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
ASSERT_EQUALS(q.toBSON(),
- fromjson(
- "{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
- "test'], negatedPhrases: []}"));
+ fromjson("{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
+ "test'], negatedPhrases: []}"));
}
TEST(FTSQueryImpl, HyphenBetweenTermAndPhraseShouldBeTreatedAsDelimiter) {
@@ -210,9 +208,8 @@ TEST(FTSQueryImpl, HyphenBetweenTermAndPhraseShouldBeTreatedAsDelimiter) {
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
ASSERT_EQUALS(q.toBSON(),
- fromjson(
- "{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
- "test'], negatedPhrases: []}"));
+ fromjson("{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
+ "test'], negatedPhrases: []}"));
}
TEST(FTSQueryImpl, HyphenShouldNegateAllSucceedingPhrasesSeparatedByHyphens) {
@@ -223,9 +220,8 @@ TEST(FTSQueryImpl, HyphenShouldNegateAllSucceedingPhrasesSeparatedByHyphens) {
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
ASSERT_EQUALS(q.toBSON(),
- fromjson(
- "{terms: ['anoth', 'phrase'], negatedTerms: [], phrases: ['another "
- "phrase'], negatedPhrases: ['really fun', 'stuff here']}"));
+ fromjson("{terms: ['anoth', 'phrase'], negatedTerms: [], phrases: ['another "
+ "phrase'], negatedPhrases: ['really fun', 'stuff here']}"));
}
TEST(FTSQueryImpl, CaseSensitiveOption) {
@@ -309,9 +305,8 @@ TEST(FTSQueryImpl, Mix1) {
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
ASSERT_EQUALS(q.toBSON(),
- fromjson(
- "{terms: ['industri'], negatedTerms: ['melbourn', 'physic'], phrases: "
- "['industry'], negatedPhrases: []}"));
+ fromjson("{terms: ['industri'], negatedTerms: ['melbourn', 'physic'], phrases: "
+ "['industry'], negatedPhrases: []}"));
}
TEST(FTSQueryImpl, NegPhrase2) {
diff --git a/src/mongo/db/fts/fts_spec.cpp b/src/mongo/db/fts/fts_spec.cpp
index 05248d157b7..0c03ed8aa18 100644
--- a/src/mongo/db/fts/fts_spec.cpp
+++ b/src/mongo/db/fts/fts_spec.cpp
@@ -58,7 +58,7 @@ const std::string moduleDefaultLanguage("english");
bool validateOverride(const string& override) {
// The override field can't be empty, can't be prefixed with a dollar sign, and
// can't contain a dot.
- return !override.empty() && override[0] != '$' && override.find('.') == std::string::npos;
+ return !override.empty()&& override[0] != '$' && override.find('.') == std::string::npos;
}
}
@@ -90,8 +90,12 @@ FTSSpec::FTSSpec(const BSONObj& indexInfo) {
msgasserted(17364,
str::stream() << "attempt to use unsupported textIndexVersion "
<< textIndexVersionElt.numberInt()
- << "; versions supported: " << TEXT_INDEX_VERSION_3 << ", "
- << TEXT_INDEX_VERSION_2 << ", " << TEXT_INDEX_VERSION_1);
+ << "; versions supported: "
+ << TEXT_INDEX_VERSION_3
+ << ", "
+ << TEXT_INDEX_VERSION_2
+ << ", "
+ << TEXT_INDEX_VERSION_1);
}
// Initialize _defaultLanguage. Note that the FTSLanguage constructor requires
@@ -401,7 +405,9 @@ StatusWith<BSONObj> FTSSpec::fixSpec(const BSONObj& spec) {
if (i->second <= 0 || i->second >= MAX_WORD_WEIGHT) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "text index weight must be in the exclusive interval (0,"
- << MAX_WORD_WEIGHT << ") but found: " << i->second};
+ << MAX_WORD_WEIGHT
+ << ") but found: "
+ << i->second};
}
// Verify weight refers to a valid field.
diff --git a/src/mongo/db/fts/fts_spec.h b/src/mongo/db/fts/fts_spec.h
index a00e04f7052..8cd293e70cf 100644
--- a/src/mongo/db/fts/fts_spec.h
+++ b/src/mongo/db/fts/fts_spec.h
@@ -31,8 +31,8 @@
#pragma once
#include <map>
-#include <vector>
#include <string>
+#include <vector>
#include "mongo/base/status_with.h"
#include "mongo/db/fts/fts_language.h"
diff --git a/src/mongo/db/fts/fts_spec_legacy.cpp b/src/mongo/db/fts/fts_spec_legacy.cpp
index 15739d8787d..f660c00f526 100644
--- a/src/mongo/db/fts/fts_spec_legacy.cpp
+++ b/src/mongo/db/fts/fts_spec_legacy.cpp
@@ -241,7 +241,9 @@ StatusWith<BSONObj> FTSSpec::_fixSpecV1(const BSONObj& spec) {
if (i->second <= 0 || i->second >= MAX_WORD_WEIGHT) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "text index weight must be in the exclusive interval (0,"
- << MAX_WORD_WEIGHT << ") but found: " << i->second};
+ << MAX_WORD_WEIGHT
+ << ") but found: "
+ << i->second};
}
b.append(i->first, i->second);
}
diff --git a/src/mongo/db/fts/fts_spec_test.cpp b/src/mongo/db/fts/fts_spec_test.cpp
index 5ecc0109333..3c041cbd363 100644
--- a/src/mongo/db/fts/fts_spec_test.cpp
+++ b/src/mongo/db/fts/fts_spec_test.cpp
@@ -184,7 +184,9 @@ TEST(FTSSpec, ScoreSingleField1) {
BSONObj user = BSON("key" << BSON("title"
<< "text"
<< "text"
- << "text") << "weights" << BSON("title" << 10));
+ << "text")
+ << "weights"
+ << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -202,7 +204,9 @@ TEST(FTSSpec, ScoreMultipleField1) {
BSONObj user = BSON("key" << BSON("title"
<< "text"
<< "text"
- << "text") << "weights" << BSON("title" << 10));
+ << "text")
+ << "weights"
+ << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -243,7 +247,9 @@ TEST(FTSSpec, ScoreRepeatWord) {
BSONObj user = BSON("key" << BSON("title"
<< "text"
<< "text"
- << "text") << "weights" << BSON("title" << 10));
+ << "text")
+ << "weights"
+ << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -268,7 +274,8 @@ TEST(FTSSpec, Extra1) {
TEST(FTSSpec, Extra2) {
BSONObj user = BSON("key" << BSON("data"
<< "text"
- << "x" << 1));
+ << "x"
+ << 1));
BSONObj fixed = assertGet(FTSSpec::fixSpec(user));
FTSSpec spec(fixed);
ASSERT_EQUALS(0U, spec.numExtraBefore());
@@ -286,7 +293,8 @@ TEST(FTSSpec, Extra3) {
ASSERT_EQUALS(BSON("x" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1),
+ << "_ftsx"
+ << 1),
fixed["key"].Obj());
ASSERT_EQUALS(BSON("data" << 1), fixed["weights"].Obj());
@@ -512,7 +520,9 @@ TEST(FTSSpec, NestedLanguages_Wildcard) {
// Multi-language test_6: test wildcard spec with override
TEST(FTSSpec, NestedLanguages_WildcardOverride) {
BSONObj indexSpec = BSON("key" << BSON("$**"
- << "text") << "weights" << BSON("d.e.f" << 20));
+ << "text")
+ << "weights"
+ << BSON("d.e.f" << 20));
FTSSpec spec(assertGet(FTSSpec::fixSpec(indexSpec)));
TermFrequencyMap tfm;
diff --git a/src/mongo/db/geo/big_polygon_test.cpp b/src/mongo/db/geo/big_polygon_test.cpp
index 3ac82b03768..26146654f40 100644
--- a/src/mongo/db/geo/big_polygon_test.cpp
+++ b/src/mongo/db/geo/big_polygon_test.cpp
@@ -28,8 +28,8 @@
#include "mongo/db/geo/big_polygon.h"
-#include "mongo/bson/util/builder.h"
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/bson/util/builder.h"
#include "mongo/unittest/unittest.h"
namespace {
@@ -80,7 +80,8 @@ typedef PointBuilder points;
TEST(BigSimplePolygon, Basic) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
// A 10x10 square centered at [0,0]
S2Polygon poly10(loopVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
@@ -93,7 +94,8 @@ TEST(BigSimplePolygon, Basic) {
// A 20x20 square centered at [0,20]
BigSimplePolygon bigPoly20Offset(loop(points() << LatLng(10.0, 30.0) << LatLng(10.0, 10.0)
- << LatLng(-10.0, 10.0) << LatLng(-10.0, 30.0)));
+ << LatLng(-10.0, 10.0)
+ << LatLng(-10.0, 30.0)));
ASSERT_LESS_THAN(bigPoly20Offset.GetArea(), 2 * M_PI);
ASSERT_LESS_THAN(poly10.GetArea(), bigPoly20Offset.GetArea());
@@ -105,15 +107,18 @@ TEST(BigSimplePolygon, BasicWithHole) {
// A 30x30 square centered at [0,0] with a 20X20 hole
vector<S2Loop*> loops;
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0)
+ << LatLng(-15.0, 15.0)));
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
S2Polygon holePoly(&loops);
// A 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0)
+ << LatLng(-8.0, 8.0)));
ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16.Contains(holePoly));
@@ -121,7 +126,8 @@ TEST(BigSimplePolygon, BasicWithHole) {
// A big polygon bigger than the hole.
BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0)
+ << LatLng(-12.0, 12.0)));
ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24.Contains(holePoly));
ASSERT_TRUE(bigPoly24.Intersects(holePoly));
@@ -132,10 +138,12 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
vector<S2Loop*> loops;
// Border
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0)
+ << LatLng(-15.0, 15.0)));
// Hole
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
// Shell
loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
<< LatLng(-5.0, 5.0)));
@@ -143,21 +151,24 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
// A 16X16 square centered at [0,0] containing the shell
BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0)
+ << LatLng(-8.0, 8.0)));
ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16.Contains(shellPoly));
ASSERT_TRUE(bigPoly16.Intersects(shellPoly));
// Try a big polygon bigger than the hole.
BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0)
+ << LatLng(-12.0, 12.0)));
ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24.Contains(shellPoly));
ASSERT_TRUE(bigPoly24.Intersects(shellPoly));
// Try a big polygon smaller than the shell.
BigSimplePolygon bigPoly8(loop(points() << LatLng(4.0, 4.0) << LatLng(4.0, -4.0)
- << LatLng(-4.0, -4.0) << LatLng(-4.0, 4.0)));
+ << LatLng(-4.0, -4.0)
+ << LatLng(-4.0, 4.0)));
ASSERT_LESS_THAN(bigPoly8.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly8.Contains(shellPoly));
ASSERT_TRUE(bigPoly8.Intersects(shellPoly));
@@ -166,7 +177,8 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
TEST(BigSimplePolygon, BasicComplement) {
// Everything *not* in a 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
bigPoly20Comp.Invert();
// A 10x10 square centered at [0,0]
@@ -179,7 +191,8 @@ TEST(BigSimplePolygon, BasicComplement) {
// A 10x10 square centered at [0,20], contained by bigPoly20Comp
S2Polygon poly10Contained(loopVec(points() << LatLng(25.0, 25.0) << LatLng(25.0, 15.0)
- << LatLng(15.0, 15.0) << LatLng(15.0, 25.0)));
+ << LatLng(15.0, 15.0)
+ << LatLng(15.0, 25.0)));
ASSERT_LESS_THAN(poly10Contained.GetArea(), bigPoly20Comp.GetArea());
ASSERT(bigPoly20Comp.Contains(poly10Contained));
@@ -188,7 +201,8 @@ TEST(BigSimplePolygon, BasicComplement) {
// A 30x30 square centered at [0,0], so that bigPoly20Comp contains its complement entirely,
// which is not allowed by S2.
S2Polygon poly30(loopVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0)
+ << LatLng(-15.0, 15.0)));
ASSERT_LESS_THAN(poly30.GetArea(), bigPoly20Comp.GetArea());
ASSERT_FALSE(bigPoly20Comp.Contains(poly30));
ASSERT_TRUE(bigPoly20Comp.Intersects(poly30));
@@ -197,7 +211,8 @@ TEST(BigSimplePolygon, BasicComplement) {
TEST(BigSimplePolygon, BasicIntersects) {
// Everything *not* in a 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
bigPoly20.Invert();
// A 10x10 square centered at [10,10] (partial overlap)
@@ -212,16 +227,19 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// A 30x30 square centered at [0,0] with a 20X20 hole
vector<S2Loop*> loops;
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0)
+ << LatLng(-15.0, 15.0)));
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
S2Polygon holePoly(&loops);
// 1. BigPolygon doesn't touch holePoly
// Everything *not* in a 40x40 square centered at [0,0]
BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
- << LatLng(-20.0, -20.0) << LatLng(-20.0, 20.0)));
+ << LatLng(-20.0, -20.0)
+ << LatLng(-20.0, 20.0)));
bigPoly40Comp.Invert();
ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40Comp.Contains(holePoly));
@@ -230,7 +248,8 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 2. BigPolygon intersects holePoly
// Everything *not* in a 24X24 square centered at [0,0]
BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0)
+ << LatLng(-12.0, 12.0)));
bigPoly24Comp.Invert();
ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24Comp.Contains(holePoly));
@@ -239,7 +258,8 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 3. BigPolygon contains holePoly
// Everything *not* in a 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0)
+ << LatLng(-8.0, 8.0)));
bigPoly16Comp.Invert();
ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
ASSERT_TRUE(bigPoly16Comp.Contains(holePoly));
@@ -261,10 +281,12 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
vector<S2Loop*> loops;
// Border
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0)
+ << LatLng(-15.0, 15.0)));
// Hole
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
// Shell
loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
<< LatLng(-5.0, 5.0)));
@@ -273,7 +295,8 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 1. BigPolygon doesn't touch shellPoly
// Everything *not* in a 40x40 square centered at [0,0]
BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
- << LatLng(-20.0, -20.0) << LatLng(-20.0, 20.0)));
+ << LatLng(-20.0, -20.0)
+ << LatLng(-20.0, 20.0)));
bigPoly40Comp.Invert();
ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40Comp.Contains(shellPoly));
@@ -282,7 +305,8 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 2. BigPolygon intersects shellPoly
// Everything *not* in a 24X24 square centered at [0,0]
BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0)
+ << LatLng(-12.0, 12.0)));
bigPoly24Comp.Invert();
ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24Comp.Contains(shellPoly));
@@ -291,7 +315,8 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 3. BigPolygon contains shellPoly's outer ring
// Everything *not* in a 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0)
+ << LatLng(-8.0, 8.0)));
bigPoly16Comp.Invert();
ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16Comp.Contains(shellPoly));
@@ -309,7 +334,8 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 5. BigPolygon contain shellPoly (CW)
BigSimplePolygon bigPolyCompOffset(loop(points() << LatLng(6.0, 6.0) << LatLng(6.0, 8.0)
- << LatLng(-6.0, 8.0) << LatLng(-6.0, 6.0)));
+ << LatLng(-6.0, 8.0)
+ << LatLng(-6.0, 6.0)));
ASSERT_GREATER_THAN(bigPolyCompOffset.GetArea(), 2 * M_PI);
ASSERT_TRUE(bigPolyCompOffset.Contains(shellPoly));
ASSERT_TRUE(bigPolyCompOffset.Intersects(shellPoly));
@@ -318,11 +344,13 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
TEST(BigSimplePolygon, BasicWinding) {
// A 20x20 square centered at [0,0] (CCW)
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
// Everything *not* in a 20x20 square centered at [0,0] (CW)
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
- << LatLng(-10.0, -10.0) << LatLng(10.0, -10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(10.0, -10.0)));
ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
@@ -331,11 +359,13 @@ TEST(BigSimplePolygon, BasicWinding) {
TEST(BigSimplePolygon, LineRelations) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0)
+ << LatLng(-5.0, 5.0)));
ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
ASSERT(bigPoly20.Contains(line10));
@@ -355,12 +385,14 @@ TEST(BigSimplePolygon, LineRelations) {
TEST(BigSimplePolygon, LineRelationsComplement) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
bigPoly20Comp.Invert();
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0)
+ << LatLng(-5.0, 5.0)));
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly20Comp.Contains(line10));
@@ -373,7 +405,8 @@ TEST(BigSimplePolygon, LineRelationsComplement) {
// A 10x10 line circling [0,0]
S2Polyline line30(pointVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0)
+ << LatLng(-15.0, 15.0)));
ASSERT_TRUE(bigPoly20Comp.Contains(line30));
ASSERT_TRUE(bigPoly20Comp.Intersects(line30));
}
@@ -381,11 +414,13 @@ TEST(BigSimplePolygon, LineRelationsComplement) {
TEST(BigSimplePolygon, LineRelationsWinding) {
// Everything *not* in a 20x20 square centered at [0,0] (CW winding)
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
- << LatLng(-10.0, -10.0) << LatLng(10.0, -10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(10.0, -10.0)));
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0)
+ << LatLng(-5.0, 5.0)));
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly20Comp.Contains(line10));
@@ -395,11 +430,13 @@ TEST(BigSimplePolygon, LineRelationsWinding) {
TEST(BigSimplePolygon, PolarContains) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0)
+ << LatLng(80.0, -90.0)));
// Square 5 degrees from the north pole [90, 0]
S2Polygon northPoly(loopVec(points() << LatLng(85.0, 0.0) << LatLng(85.0, 90.0)
- << LatLng(85.0, 180.0) << LatLng(85.0, -90.0)));
+ << LatLng(85.0, 180.0)
+ << LatLng(85.0, -90.0)));
ASSERT_LESS_THAN(bigNorthPoly.GetArea(), 2 * M_PI);
ASSERT_LESS_THAN(northPoly.GetArea(), bigNorthPoly.GetArea());
@@ -410,7 +447,8 @@ TEST(BigSimplePolygon, PolarContains) {
TEST(BigSimplePolygon, PolarContainsWithHoles) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0)
+ << LatLng(80.0, -90.0)));
// Square 5 degrees from the north pole [90, 0] with a concentric hole 1 degree from the
// north pole
@@ -429,7 +467,8 @@ TEST(BigSimplePolygon, PolarContainsWithHoles) {
TEST(BigSimplePolygon, PolarIntersectsWithHoles) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0)
+ << LatLng(80.0, -90.0)));
// 5-degree square with 1-degree-wide concentric hole, centered on [80.0, 0.0]
vector<S2Loop*> loops;
@@ -472,7 +511,8 @@ void checkConsistency(const BigSimplePolygon& bigPoly,
TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Big polygon smaller than a hemisphere.
BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0)
+ << LatLng(80.0, 90.0)));
ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
// Vertex point and collinear point
@@ -481,10 +521,12 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Polygon shares one edge
S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, -10.0) << LatLng(80.0, -10.0)));
+ << LatLng(-80.0, -10.0)
+ << LatLng(80.0, -10.0)));
// Polygon shares a segment of one edge
S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, -10.0) << LatLng(50.0, -10.0)));
+ << LatLng(-50.0, -10.0)
+ << LatLng(50.0, -10.0)));
// Line
S2Polyline line(
@@ -495,9 +537,12 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Big polygon larger than a hemisphere.
BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0) << LatLng(-80.0, 180.0)
- << LatLng(-80.0, -90.0) << LatLng(80.0, -90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0)
+ << LatLng(-80.0, 180.0)
+ << LatLng(-80.0, -90.0)
+ << LatLng(80.0, -90.0)
+ << LatLng(80.0, 180.0)
+ << LatLng(80.0, 90.0)));
ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
checkConsistency(bigPoly, expandedBigPoly, point);
@@ -525,15 +570,18 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
TEST(BigSimplePolygon, ShareEdgeContained) {
// Big polygon smaller than a hemisphere.
BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0)
+ << LatLng(80.0, 90.0)));
ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
// Polygon
S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 10.0) << LatLng(80.0, 10.0)));
+ << LatLng(-80.0, 10.0)
+ << LatLng(80.0, 10.0)));
// Polygon shares a segment of one edge
S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, 10.0) << LatLng(50.0, 10.0)));
+ << LatLng(-50.0, 10.0)
+ << LatLng(50.0, 10.0)));
// Line
S2Polyline line(
pointVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0) << LatLng(0.0, 10.0)));
@@ -543,9 +591,12 @@ TEST(BigSimplePolygon, ShareEdgeContained) {
// Big polygon larger than a hemisphere.
BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0) << LatLng(-80.0, 180.0)
- << LatLng(-80.0, -90.0) << LatLng(80.0, -90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0)
+ << LatLng(-80.0, 180.0)
+ << LatLng(-80.0, -90.0)
+ << LatLng(80.0, -90.0)
+ << LatLng(80.0, 180.0)
+ << LatLng(80.0, 90.0)));
ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
checkConsistency(bigPoly, expandedBigPoly, poly);
diff --git a/src/mongo/db/geo/geoparser.cpp b/src/mongo/db/geo/geoparser.cpp
index 12831f640bc..7e58a768aa8 100644
--- a/src/mongo/db/geo/geoparser.cpp
+++ b/src/mongo/db/geo/geoparser.cpp
@@ -30,9 +30,9 @@
#include "mongo/db/geo/geoparser.h"
+#include <cmath>
#include <string>
#include <vector>
-#include <cmath>
#include "mongo/db/geo/shapes.h"
#include "mongo/db/jsobj.h"
@@ -227,7 +227,8 @@ static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem,
"Secondary loops not contained by first exterior loop - "
"secondary loops must be holes: "
<< coordinateElt.toString(false)
- << " first loop: " << elem.Obj().firstElement().toString(false));
+ << " first loop: "
+ << elem.Obj().firstElement().toString(false));
}
}
diff --git a/src/mongo/db/geo/geoparser_test.cpp b/src/mongo/db/geo/geoparser_test.cpp
index a8fc1397659..4f90986d3c1 100644
--- a/src/mongo/db/geo/geoparser_test.cpp
+++ b/src/mongo/db/geo/geoparser_test.cpp
@@ -30,13 +30,13 @@
* This file contains tests for mongo/db/geo/geoparser.cpp.
*/
-#include <string>
#include <sstream>
+#include <string>
#include "mongo/db/geo/geoparser.h"
#include "mongo/db/geo/shapes.h"
-#include "mongo/db/json.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/json.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
@@ -148,16 +148,14 @@ TEST(GeoParser, parseGeoJSONPolygon) {
&polygon));
// And one with a hole.
ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson(
- "{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]],"
- " [[1,1],[4,1],[4,4],[1,4],[1,1]] ]}"),
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]],"
+ " [[1,1],[4,1],[4,4],[1,4],[1,1]] ]}"),
false,
&polygon));
// Latitudes must be OK
ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson(
- "{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,91],[0,91],[0,0]],"
- " [[1,1],[4,1],[4,4],[1,4],[1,1]] ]}"),
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,91],[0,91],[0,0]],"
+ " [[1,1],[4,1],[4,4],[1,4],[1,1]] ]}"),
false,
&polygon));
// First point must be the same as the last.
@@ -165,9 +163,8 @@ TEST(GeoParser, parseGeoJSONPolygon) {
fromjson("{'type':'Polygon', 'coordinates':[ [[1,2],[3,4],[5,6]] ]}"), false, &polygon));
// Extra elements are allowed
ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson(
- "{'type':'Polygon', 'coordinates':[ [[0,0,0,0],[5,0,0],[5,5,1],"
- " [0,5],[0,0]] ]}"),
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0,0,0],[5,0,0],[5,5,1],"
+ " [0,5],[0,0]] ]}"),
false,
&polygon));
@@ -185,9 +182,8 @@ TEST(GeoParser, parseGeoJSONPolygon) {
PolygonWithCRS polygonB;
ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson(
- "{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]],"
- " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]}"),
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]],"
+ " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]}"),
false,
&polygonB));
// We removed this in the hole.
@@ -204,9 +200,8 @@ TEST(GeoParser, parseGeoJSONPolygon) {
PolygonWithCRS polygonD;
ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson(
- "{'type':'Polygon', 'coordinates':[ [[0,0],[0,5],[5,5],[5,0],[0,0]],"
- " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]}"),
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[0,5],[5,5],[5,0],[0,0]],"
+ " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]}"),
false,
&polygonD));
// Also removed in the loop.
@@ -324,31 +319,28 @@ TEST(GeoParser, parseMultiLine) {
mongo::MultiLineWithCRS ml;
ASSERT_OK(GeoParser::parseMultiLine(
- fromjson(
- "{'type':'MultiLineString','coordinates':[ [[1,1],[2,2],[3,3]],"
- "[[4,5],[6,7]]]}"),
+ fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2],[3,3]],"
+ "[[4,5],[6,7]]]}"),
false,
&ml));
ASSERT_EQUALS(ml.lines.size(), (size_t)2);
- ASSERT_OK(
- GeoParser::parseMultiLine(fromjson(
- "{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]],"
- "[[4,5],[6,7]]]}"),
- false,
- &ml));
+ ASSERT_OK(GeoParser::parseMultiLine(
+ fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]],"
+ "[[4,5],[6,7]]]}"),
+ false,
+ &ml));
ASSERT_EQUALS(ml.lines.size(), (size_t)2);
ASSERT_OK(GeoParser::parseMultiLine(
fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]]]}"), false, &ml));
ASSERT_EQUALS(ml.lines.size(), (size_t)1);
- ASSERT_OK(
- GeoParser::parseMultiLine(fromjson(
- "{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]],"
- "[[2,2],[1,1]]]}"),
- false,
- &ml));
+ ASSERT_OK(GeoParser::parseMultiLine(
+ fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]],"
+ "[[2,2],[1,1]]]}"),
+ false,
+ &ml));
ASSERT_EQUALS(ml.lines.size(), (size_t)2);
ASSERT_NOT_OK(GeoParser::parseMultiLine(
@@ -365,22 +357,20 @@ TEST(GeoParser, parseMultiPolygon) {
mongo::MultiPolygonWithCRS mp;
ASSERT_OK(GeoParser::parseMultiPolygon(
- fromjson(
- "{'type':'MultiPolygon','coordinates':["
- "[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],"
- "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
- "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
- "]}"),
+ fromjson("{'type':'MultiPolygon','coordinates':["
+ "[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],"
+ "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
+ "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
+ "]}"),
false,
&mp));
ASSERT_EQUALS(mp.polygons.size(), (size_t)2);
ASSERT_OK(GeoParser::parseMultiPolygon(
- fromjson(
- "{'type':'MultiPolygon','coordinates':["
- "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
- "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
- "]}"),
+ fromjson("{'type':'MultiPolygon','coordinates':["
+ "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
+ "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
+ "]}"),
false,
&mp));
ASSERT_EQUALS(mp.polygons.size(), (size_t)1);
diff --git a/src/mongo/db/geo/hash.cpp b/src/mongo/db/geo/hash.cpp
index 9b96a98a957..affc42e7fba 100644
--- a/src/mongo/db/geo/hash.cpp
+++ b/src/mongo/db/geo/hash.cpp
@@ -26,11 +26,11 @@
* it in the license file.
*/
+#include "mongo/db/geo/hash.h"
#include "mongo/config.h"
#include "mongo/db/field_parser.h"
-#include "mongo/db/jsobj.h"
-#include "mongo/db/geo/hash.h"
#include "mongo/db/geo/shapes.h"
+#include "mongo/db/jsobj.h"
#include "mongo/util/mongoutils/str.h"
#include <algorithm> // for max()
@@ -669,13 +669,19 @@ Status GeoHashConverter::parseParameters(const BSONObj& paramDoc,
if (params->bits < 1 || params->bits > 32) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "bits for hash must be > 0 and <= 32, "
- << "but " << params->bits << " bits were specified");
+ << "but "
+ << params->bits
+ << " bits were specified");
}
if (params->min >= params->max) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "region for hash must be valid and have positive area, "
- << "but [" << params->min << ", " << params->max << "] "
+ << "but ["
+ << params->min
+ << ", "
+ << params->max
+ << "] "
<< "was specified");
}
@@ -770,7 +776,8 @@ GeoHash GeoHashConverter::hash(const BSONObj& o, const BSONObj* src) const {
GeoHash GeoHashConverter::hash(double x, double y) const {
uassert(16433,
str::stream() << "point not in interval of [ " << _params.min << ", " << _params.max
- << " ]" << causedBy(BSON_ARRAY(x << y).toString()),
+ << " ]"
+ << causedBy(BSON_ARRAY(x << y).toString()),
x <= _params.max && x >= _params.min && y <= _params.max && y >= _params.min);
return GeoHash(convertToHashScale(x), convertToHashScale(y), _params.bits);
diff --git a/src/mongo/db/geo/hash.h b/src/mongo/db/geo/hash.h
index 0fbbaa2ac38..f772145ede8 100644
--- a/src/mongo/db/geo/hash.h
+++ b/src/mongo/db/geo/hash.h
@@ -28,8 +28,8 @@
#pragma once
-#include "mongo/platform/basic.h"
#include "mongo/db/jsobj.h"
+#include "mongo/platform/basic.h"
namespace mongo {
diff --git a/src/mongo/db/geo/hash_test.cpp b/src/mongo/db/geo/hash_test.cpp
index bfe6050fd9d..23aed0a6d93 100644
--- a/src/mongo/db/geo/hash_test.cpp
+++ b/src/mongo/db/geo/hash_test.cpp
@@ -30,12 +30,12 @@
* This file contains tests for mongo/db/geo/hash.cpp.
*/
+#include <algorithm> // For max()
#include <bitset>
-#include <string>
-#include <sstream>
-#include <iomanip>
#include <cmath>
-#include <algorithm> // For max()
+#include <iomanip>
+#include <sstream>
+#include <string>
#include "mongo/db/geo/hash.h"
#include "mongo/db/geo/shapes.h"
diff --git a/src/mongo/db/geo/r2_region_coverer.cpp b/src/mongo/db/geo/r2_region_coverer.cpp
index e9cbc789833..c593498e683 100644
--- a/src/mongo/db/geo/r2_region_coverer.cpp
+++ b/src/mongo/db/geo/r2_region_coverer.cpp
@@ -32,8 +32,8 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/geo/shapes.h"
#include "mongo/db/geo/r2_region_coverer.h"
+#include "mongo/db/geo/shapes.h"
#include "mongo/util/log.h"
namespace mongo {
diff --git a/src/mongo/db/geo/r2_region_coverer_test.cpp b/src/mongo/db/geo/r2_region_coverer_test.cpp
index ba487d231a9..69b6abba563 100644
--- a/src/mongo/db/geo/r2_region_coverer_test.cpp
+++ b/src/mongo/db/geo/r2_region_coverer_test.cpp
@@ -34,10 +34,10 @@
#include "mongo/db/geo/r2_region_coverer.h"
#include "mongo/base/init.h"
-#include "mongo/unittest/unittest.h"
-#include "mongo/platform/random.h"
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/geo/geometry_container.h"
+#include "mongo/platform/random.h"
+#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
namespace {
@@ -278,7 +278,8 @@ GeometryContainer* getRandomCircle(double radius) {
container->parseFromQuery(
BSON("$center" << BSON_ARRAY(BSON_ARRAY(randDouble(radius, MAXBOUND - radius)
<< randDouble(radius, MAXBOUND - radius))
- << radius)).firstElement());
+ << radius))
+ .firstElement());
return container;
}
diff --git a/src/mongo/db/geo/shapes.cpp b/src/mongo/db/geo/shapes.cpp
index fa6018877bb..5ccc0fb85d4 100644
--- a/src/mongo/db/geo/shapes.cpp
+++ b/src/mongo/db/geo/shapes.cpp
@@ -26,8 +26,8 @@
* it in the license file.
*/
-#include "mongo/db/jsobj.h"
#include "mongo/db/geo/shapes.h"
+#include "mongo/db/jsobj.h"
#include "mongo/util/mongoutils/str.h"
using std::abs;
diff --git a/src/mongo/db/geo/shapes.h b/src/mongo/db/geo/shapes.h
index 3d8863ff964..7c610d94bf8 100644
--- a/src/mongo/db/geo/shapes.h
+++ b/src/mongo/db/geo/shapes.h
@@ -33,9 +33,9 @@
#include <vector>
#include "mongo/base/owned_pointer_vector.h"
-#include "mongo/db/jsobj.h"
#include "mongo/db/geo/big_polygon.h"
#include "mongo/db/geo/s2.h"
+#include "mongo/db/jsobj.h"
#include "third_party/s2/s2cap.h"
#include "third_party/s2/s2cell.h"
#include "third_party/s2/s2latlng.h"
diff --git a/src/mongo/db/hasher_test.cpp b/src/mongo/db/hasher_test.cpp
index a837126bebd..874181a3100 100644
--- a/src/mongo/db/hasher_test.cpp
+++ b/src/mongo/db/hasher_test.cpp
@@ -30,10 +30,10 @@
#include "mongo/platform/basic.h"
+#include "mongo/bson/bsontypes.h"
#include "mongo/db/hasher.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
-#include "mongo/bson/bsontypes.h"
#include "mongo/unittest/unittest.h"
@@ -271,7 +271,8 @@ TEST(BSONElementHasher, HashString) {
TEST(BSONElementHasher, HashObject) {
BSONObj o = BSON("check" << BSON("a"
<< "abc"
- << "b" << 123LL));
+ << "b"
+ << 123LL));
ASSERT_EQUALS(hashIt(o), 4771603801758380216LL);
o = BSON("check" << BSONObj());
diff --git a/src/mongo/db/index/2d_access_method.cpp b/src/mongo/db/index/2d_access_method.cpp
index c4e5e6e8843..ed63659593a 100644
--- a/src/mongo/db/index/2d_access_method.cpp
+++ b/src/mongo/db/index/2d_access_method.cpp
@@ -31,10 +31,10 @@
#include <string>
#include <vector>
-#include "mongo/db/index_names.h"
#include "mongo/db/index/2d_common.h"
#include "mongo/db/index/expression_keys_private.h"
#include "mongo/db/index/expression_params.h"
+#include "mongo/db/index_names.h"
#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/db/index/btree_access_method.h b/src/mongo/db/index/btree_access_method.h
index 5873514c01c..ed5389d5f79 100644
--- a/src/mongo/db/index/btree_access_method.h
+++ b/src/mongo/db/index/btree_access_method.h
@@ -30,9 +30,9 @@
#include "mongo/base/status.h"
-#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/btree_key_generator.h"
#include "mongo/db/index/index_access_method.h"
+#include "mongo/db/index/index_access_method.h"
#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/db/index/btree_key_generator.cpp b/src/mongo/db/index/btree_key_generator.cpp
index 1f1eb949b06..5e847e77438 100644
--- a/src/mongo/db/index/btree_key_generator.cpp
+++ b/src/mongo/db/index/btree_key_generator.cpp
@@ -243,8 +243,10 @@ BSONElement BtreeKeyGeneratorV1::extractNextElement(const BSONObj& obj,
uassert(16746,
mongoutils::str::stream()
<< "Ambiguous field name found in array (do not use numeric field names in "
- "embedded elements in an array), field: '" << arrField.fieldName()
- << "' for array: " << positionalInfo.arrayObj,
+ "embedded elements in an array), field: '"
+ << arrField.fieldName()
+ << "' for array: "
+ << positionalInfo.arrayObj,
!haveObjField || !positionalInfo.hasPositionallyIndexedElt());
*arrayNestedArray = false;
diff --git a/src/mongo/db/index/expression_keys_private.cpp b/src/mongo/db/index/expression_keys_private.cpp
index 99f2b889c01..9afc653127f 100644
--- a/src/mongo/db/index/expression_keys_private.cpp
+++ b/src/mongo/db/index/expression_keys_private.cpp
@@ -37,9 +37,9 @@
#include "mongo/db/geo/geometry_container.h"
#include "mongo/db/geo/geoparser.h"
#include "mongo/db/geo/s2.h"
-#include "mongo/db/index_names.h"
#include "mongo/db/index/2d_common.h"
#include "mongo/db/index/s2_common.h"
+#include "mongo/db/index_names.h"
#include "mongo/db/query/collation/collation_index_key.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/index/expression_keys_private.h b/src/mongo/db/index/expression_keys_private.h
index b84acbcae11..5206f4a6768 100644
--- a/src/mongo/db/index/expression_keys_private.h
+++ b/src/mongo/db/index/expression_keys_private.h
@@ -30,8 +30,8 @@
#include <vector>
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/db/hasher.h"
namespace mongo {
diff --git a/src/mongo/db/index/expression_params.cpp b/src/mongo/db/index/expression_params.cpp
index e0ab49c303e..956a99e6f69 100644
--- a/src/mongo/db/index/expression_params.cpp
+++ b/src/mongo/db/index/expression_params.cpp
@@ -31,9 +31,9 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/geo/geoconstants.h"
#include "mongo/db/hasher.h"
-#include "mongo/db/index_names.h"
#include "mongo/db/index/2d_common.h"
#include "mongo/db/index/s2_common.h"
+#include "mongo/db/index_names.h"
#include "mongo/util/mongoutils/str.h"
#include "third_party/s2/s2.h"
@@ -192,8 +192,14 @@ void ExpressionParams::initialize2dsphereParams(const BSONObj& infoObj,
massert(17395,
stream() << "unsupported geo index version { " << kIndexVersionFieldName << " : "
- << out->indexVersion << " }, only support versions: [" << S2_INDEX_VERSION_1
- << "," << S2_INDEX_VERSION_2 << "," << S2_INDEX_VERSION_3 << "]",
+ << out->indexVersion
+ << " }, only support versions: ["
+ << S2_INDEX_VERSION_1
+ << ","
+ << S2_INDEX_VERSION_2
+ << ","
+ << S2_INDEX_VERSION_3
+ << "]",
out->indexVersion == S2_INDEX_VERSION_3 || out->indexVersion == S2_INDEX_VERSION_2 ||
out->indexVersion == S2_INDEX_VERSION_1);
}
diff --git a/src/mongo/db/index/expression_params.h b/src/mongo/db/index/expression_params.h
index 21cf7c298c0..d8a12323abc 100644
--- a/src/mongo/db/index/expression_params.h
+++ b/src/mongo/db/index/expression_params.h
@@ -31,8 +31,8 @@
#include <string>
#include <vector>
-#include "mongo/db/jsobj.h"
#include "mongo/db/hasher.h"
+#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/db/index/external_key_generator.cpp b/src/mongo/db/index/external_key_generator.cpp
index de1aec11d64..1ab9c1ad9ae 100644
--- a/src/mongo/db/index/external_key_generator.cpp
+++ b/src/mongo/db/index/external_key_generator.cpp
@@ -32,12 +32,12 @@
#include <string>
#include "mongo/db/fts/fts_spec.h"
-#include "mongo/db/index_names.h"
#include "mongo/db/index/2d_common.h"
#include "mongo/db/index/btree_key_generator.h"
#include "mongo/db/index/expression_keys_private.h"
#include "mongo/db/index/expression_params.h"
#include "mongo/db/index/s2_common.h"
+#include "mongo/db/index_names.h"
#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/db/index/hash_access_method.cpp b/src/mongo/db/index/hash_access_method.cpp
index 10339f7eae9..34f4323fede 100644
--- a/src/mongo/db/index/hash_access_method.cpp
+++ b/src/mongo/db/index/hash_access_method.cpp
@@ -26,10 +26,10 @@
* it in the license file.
*/
+#include "mongo/db/index/hash_access_method.h"
#include "mongo/db/hasher.h"
#include "mongo/db/index/expression_keys_private.h"
#include "mongo/db/index/expression_params.h"
-#include "mongo/db/index/hash_access_method.h"
namespace mongo {
diff --git a/src/mongo/db/index/hash_access_method.h b/src/mongo/db/index/hash_access_method.h
index 8fc5db36636..e73fc2c623e 100644
--- a/src/mongo/db/index/hash_access_method.h
+++ b/src/mongo/db/index/hash_access_method.h
@@ -32,8 +32,8 @@
#include "mongo/base/status.h"
#include "mongo/db/hasher.h" // For HashSeed.
-#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/index_access_method.h"
+#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 2abb95870d3..d3ca3c0c808 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/index/btree_access_method.h"
-#include <vector>
#include <utility>
+#include <vector>
#include "mongo/base/error_codes.h"
#include "mongo/base/status.h"
@@ -110,8 +110,8 @@ IndexAccessMethod::IndexAccessMethod(IndexCatalogEntry* btreeState, SortedDataIn
bool IndexAccessMethod::ignoreKeyTooLong(OperationContext* txn) {
// Ignore this error if we're on a secondary or if the user requested it
- const auto canAcceptWritesForNs = repl::ReplicationCoordinator::get(txn)
- ->canAcceptWritesFor(NamespaceString(_btreeState->ns()));
+ const auto canAcceptWritesForNs = repl::ReplicationCoordinator::get(txn)->canAcceptWritesFor(
+ NamespaceString(_btreeState->ns()));
return !canAcceptWritesForNs || !failIndexKeyTooLong;
}
diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h
index 3bb46d78a57..cead1965b9e 100644
--- a/src/mongo/db/index/index_descriptor.h
+++ b/src/mongo/db/index/index_descriptor.h
@@ -32,9 +32,9 @@
#include <string>
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/index/multikey_paths.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/util/stacktrace.h"
diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp
index a05f72855dc..eee92fa8037 100644
--- a/src/mongo/db/index/s2_access_method.cpp
+++ b/src/mongo/db/index/s2_access_method.cpp
@@ -33,11 +33,11 @@
#include <vector>
#include "mongo/base/status.h"
-#include "mongo/db/geo/geoparser.h"
#include "mongo/db/geo/geoconstants.h"
-#include "mongo/db/index_names.h"
+#include "mongo/db/geo/geoparser.h"
#include "mongo/db/index/expression_keys_private.h"
#include "mongo/db/index/expression_params.h"
+#include "mongo/db/index_names.h"
#include "mongo/db/jsobj.h"
#include "mongo/util/log.h"
@@ -95,18 +95,30 @@ StatusWith<BSONObj> S2AccessMethod::fixSpec(const BSONObj& specObj) {
if (!indexVersionElt.isNumber()) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid type for geo index version { " << kIndexVersionFieldName
- << " : " << indexVersionElt << " }, only versions: ["
- << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
- << S2_INDEX_VERSION_3 << "] are supported"};
+ << " : "
+ << indexVersionElt
+ << " }, only versions: ["
+ << S2_INDEX_VERSION_1
+ << ","
+ << S2_INDEX_VERSION_2
+ << ","
+ << S2_INDEX_VERSION_3
+ << "] are supported"};
}
if (indexVersionElt.type() == BSONType::NumberDouble &&
!std::isnormal(indexVersionElt.numberDouble())) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid value for geo index version { " << kIndexVersionFieldName
- << " : " << indexVersionElt << " }, only versions: ["
- << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
- << S2_INDEX_VERSION_3 << "] are supported"};
+ << " : "
+ << indexVersionElt
+ << " }, only versions: ["
+ << S2_INDEX_VERSION_1
+ << ","
+ << S2_INDEX_VERSION_2
+ << ","
+ << S2_INDEX_VERSION_3
+ << "] are supported"};
}
const auto indexVersion = indexVersionElt.numberLong();
@@ -114,9 +126,15 @@ StatusWith<BSONObj> S2AccessMethod::fixSpec(const BSONObj& specObj) {
indexVersion != S2_INDEX_VERSION_3) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "unsupported geo index version { " << kIndexVersionFieldName
- << " : " << indexVersionElt << " }, only versions: ["
- << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
- << S2_INDEX_VERSION_3 << "] are supported"};
+ << " : "
+ << indexVersionElt
+ << " }, only versions: ["
+ << S2_INDEX_VERSION_1
+ << ","
+ << S2_INDEX_VERSION_2
+ << ","
+ << S2_INDEX_VERSION_3
+ << "] are supported"};
}
return specObj;
diff --git a/src/mongo/db/index/s2_key_generator_test.cpp b/src/mongo/db/index/s2_key_generator_test.cpp
index a26a43337e8..1002ea1ecde 100644
--- a/src/mongo/db/index/s2_key_generator_test.cpp
+++ b/src/mongo/db/index/s2_key_generator_test.cpp
@@ -33,8 +33,8 @@
#include "mongo/db/index/expression_keys_private.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/db/index/s2_common.h"
#include "mongo/db/index/expression_params.h"
+#include "mongo/db/index/s2_common.h"
#include "mongo/db/json.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/unittest/unittest.h"
@@ -67,7 +67,8 @@ bool assertKeysetsEqual(const BSONObjSet& expectedKeys, const BSONObjSet& actual
long long getCellID(int x, int y) {
BSONObj obj = BSON("a" << BSON("type"
<< "Point"
- << "coordinates" << BSON_ARRAY(x << y)));
+ << "coordinates"
+ << BSON_ARRAY(x << y)));
BSONObj keyPattern = fromjson("{a: '2dsphere'}");
BSONObj infoObj = fromjson("{key: {a: '2dsphere'}, '2dsphereIndexVersion': 3}");
S2IndexingParams params;
@@ -109,7 +110,8 @@ TEST(S2KeyGeneratorTest, CollationAppliedToNonGeoStringFieldBeforeGeoField) {
BSONObjSet expectedKeys;
expectedKeys.insert(BSON(""
<< "gnirts"
- << "" << getCellID(0, 0)));
+ << ""
+ << getCellID(0, 0)));
ASSERT(assertKeysetsEqual(expectedKeys, actualKeys));
}
@@ -127,7 +129,9 @@ TEST(S2KeyGeneratorTest, CollationAppliedToAllNonGeoStringFields) {
BSONObjSet expectedKeys;
expectedKeys.insert(BSON(""
<< "gnirts"
- << "" << getCellID(0, 0) << ""
+ << ""
+ << getCellID(0, 0)
+ << ""
<< "2gnirts"));
ASSERT(assertKeysetsEqual(expectedKeys, actualKeys));
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index cbc90157404..ed9e82a6edb 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -43,8 +43,8 @@
#include "mongo/db/catalog/index_create.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/instance.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp
index c9a313491f8..acd16f3577d 100644
--- a/src/mongo/db/initialize_server_global_state.cpp
+++ b/src/mongo/db/initialize_server_global_state.cpp
@@ -38,9 +38,9 @@
#include <signal.h>
#ifndef _WIN32
-#include <syslog.h>
#include <sys/types.h>
#include <sys/wait.h>
+#include <syslog.h>
#endif
#include "mongo/base/init.h"
@@ -51,8 +51,8 @@
#include "mongo/db/auth/internal_user_auth.h"
#include "mongo/db/auth/security_key.h"
#include "mongo/db/server_options.h"
-#include "mongo/logger/logger.h"
#include "mongo/logger/console_appender.h"
+#include "mongo/logger/logger.h"
#include "mongo/logger/message_event.h"
#include "mongo/logger/message_event_utf8_encoder.h"
#include "mongo/logger/ramlog.h"
@@ -66,8 +66,8 @@
#include "mongo/util/net/listen.h"
#include "mongo/util/net/ssl_manager.h"
#include "mongo/util/processinfo.h"
-#include "mongo/util/signal_handlers_synchronous.h"
#include "mongo/util/quick_exit.h"
+#include "mongo/util/signal_handlers_synchronous.h"
namespace fs = boost::filesystem;
@@ -200,7 +200,8 @@ void forkServerOrDie() {
MONGO_INITIALIZER_GENERAL(ServerLogRedirection,
("GlobalLogManager", "EndStartupOptionHandling", "ForkServer"),
- ("default"))(InitializerContext*) {
+ ("default"))
+(InitializerContext*) {
using logger::LogManager;
using logger::MessageEventEphemeral;
using logger::MessageEventDetailsEncoder;
@@ -230,8 +231,9 @@ MONGO_INITIALIZER_GENERAL(ServerLogRedirection,
#endif // defined(_WIN32)
} else if (!serverGlobalParams.logpath.empty()) {
fassert(16448, !serverGlobalParams.logWithSyslog);
- std::string absoluteLogpath = boost::filesystem::absolute(serverGlobalParams.logpath,
- serverGlobalParams.cwd).string();
+ std::string absoluteLogpath =
+ boost::filesystem::absolute(serverGlobalParams.logpath, serverGlobalParams.cwd)
+ .string();
bool exists;
@@ -240,15 +242,16 @@ MONGO_INITIALIZER_GENERAL(ServerLogRedirection,
} catch (boost::filesystem::filesystem_error& e) {
return Status(ErrorCodes::FileNotOpen,
mongoutils::str::stream() << "Failed probe for \"" << absoluteLogpath
- << "\": " << e.code().message());
+ << "\": "
+ << e.code().message());
}
if (exists) {
if (boost::filesystem::is_directory(absoluteLogpath)) {
- return Status(ErrorCodes::FileNotOpen,
- mongoutils::str::stream()
- << "logpath \"" << absoluteLogpath
- << "\" should name a file, not a directory.");
+ return Status(
+ ErrorCodes::FileNotOpen,
+ mongoutils::str::stream() << "logpath \"" << absoluteLogpath
+ << "\" should name a file, not a directory.");
}
if (!serverGlobalParams.logAppend && boost::filesystem::is_regular(absoluteLogpath)) {
@@ -260,7 +263,9 @@ MONGO_INITIALIZER_GENERAL(ServerLogRedirection,
return Status(ErrorCodes::FileRenameFailed,
mongoutils::str::stream()
<< "Could not rename preexisting log file \""
- << absoluteLogpath << "\" to \"" << renameTarget
+ << absoluteLogpath
+ << "\" to \""
+ << renameTarget
<< "\"; run with --logappend or manually remove file: "
<< errnoWithDescription());
}
@@ -362,7 +367,9 @@ bool initializeServerGlobalState() {
clusterAuthMode == ServerGlobalParams::ClusterAuthMode_sendX509) {
setInternalUserAuthParams(
BSON(saslCommandMechanismFieldName
- << "MONGODB-X509" << saslCommandUserDBFieldName << "$external"
+ << "MONGODB-X509"
+ << saslCommandUserDBFieldName
+ << "$external"
<< saslCommandUserFieldName
<< getSSLManager()->getSSLConfiguration().clientSubjectName));
}
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 386b925e653..bae59911976 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -40,8 +40,8 @@
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/authz_manager_external_state_d.h"
-#include "mongo/db/client.h"
#include "mongo/db/catalog/cursor_manager.h"
+#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/fsync.h"
#include "mongo/db/concurrency/d_concurrency.h"
@@ -370,8 +370,8 @@ void receivedQuery(OperationContext* txn,
// If we got a stale config, wait in case the operation is stuck in a critical section
if (e.getCode() == ErrorCodes::SendStaleConfig) {
auto& sce = static_cast<const StaleConfigException&>(e);
- ShardingState::get(txn)
- ->onStaleShardVersion(txn, NamespaceString(sce.getns()), sce.getVersionReceived());
+ ShardingState::get(txn)->onStaleShardVersion(
+ txn, NamespaceString(sce.getns()), sce.getVersionReceived());
}
dbResponse.response.reset();
@@ -651,9 +651,12 @@ void assembleResponse(OperationContext* txn,
const ShardedConnectionInfo* connInfo = ShardedConnectionInfo::get(&c, false);
uassert(18663,
str::stream() << "legacy writeOps not longer supported for "
- << "versioned connections, ns: " << nsString.ns()
- << ", op: " << networkOpToString(op)
- << ", remote: " << remote.toString(),
+ << "versioned connections, ns: "
+ << nsString.ns()
+ << ", op: "
+ << networkOpToString(op)
+ << ", remote: "
+ << remote.toString(),
connInfo == NULL);
}
diff --git a/src/mongo/db/jsobj.h b/src/mongo/db/jsobj.h
index 1135b34aa5d..4691f7c157f 100644
--- a/src/mongo/db/jsobj.h
+++ b/src/mongo/db/jsobj.h
@@ -42,13 +42,13 @@
#include "mongo/platform/basic.h"
-#include "mongo/bson/util/builder.h"
-#include "mongo/bson/timestamp.h"
-#include "mongo/bson/bsontypes.h"
-#include "mongo/bson/oid.h"
+#include "mongo/base/string_data.h"
#include "mongo/bson/bsonelement.h"
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/bson/bsontypes.h"
+#include "mongo/bson/oid.h"
#include "mongo/bson/ordering.h"
-#include "mongo/base/string_data.h"
+#include "mongo/bson/timestamp.h"
+#include "mongo/bson/util/builder.h"
diff --git a/src/mongo/db/keypattern.cpp b/src/mongo/db/keypattern.cpp
index 9ef77deb639..2c1bf09497d 100644
--- a/src/mongo/db/keypattern.cpp
+++ b/src/mongo/db/keypattern.cpp
@@ -69,7 +69,8 @@ BSONObj KeyPattern::extendRangeBound(const BSONObj& bound, bool makeUpperInclusi
BSONElement patElt = pat.next();
massert(16634,
str::stream() << "field names of bound " << bound
- << " do not match those of keyPattern " << _pattern,
+ << " do not match those of keyPattern "
+ << _pattern,
str::equals(srcElt.fieldName(), patElt.fieldName()));
newBound.append(srcElt);
}
diff --git a/src/mongo/db/keypattern_test.cpp b/src/mongo/db/keypattern_test.cpp
index 300843ecfea..4b83648fa7d 100644
--- a/src/mongo/db/keypattern_test.cpp
+++ b/src/mongo/db/keypattern_test.cpp
@@ -122,10 +122,12 @@ TEST(KeyPattern, GlobalMinMax) {
BSON("a" << MAXKEY << "b" << MINKEY));
ASSERT_EQUALS(KeyPattern(BSON("a"
- << "hashed")).globalMin(),
+ << "hashed"))
+ .globalMin(),
BSON("a" << MINKEY));
ASSERT_EQUALS(KeyPattern(BSON("a"
- << "hashed")).globalMax(),
+ << "hashed"))
+ .globalMax(),
BSON("a" << MAXKEY));
//
diff --git a/src/mongo/db/matcher/expression.cpp b/src/mongo/db/matcher/expression.cpp
index 187050c5eb5..76ff3261dbb 100644
--- a/src/mongo/db/matcher/expression.cpp
+++ b/src/mongo/db/matcher/expression.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/matcher/expression.h"
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/expression.h b/src/mongo/db/matcher/expression.h
index ad10d17621c..515e25ab5e1 100644
--- a/src/mongo/db/matcher/expression.h
+++ b/src/mongo/db/matcher/expression.h
@@ -35,8 +35,8 @@
#include "mongo/base/status.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/db/matcher/matchable.h"
#include "mongo/db/matcher/match_details.h"
+#include "mongo/db/matcher/matchable.h"
#include "mongo/stdx/memory.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_algo_test.cpp b/src/mongo/db/matcher/expression_algo_test.cpp
index 73d3a5b8b88..95e7bcd62fc 100644
--- a/src/mongo/db/matcher/expression_algo_test.cpp
+++ b/src/mongo/db/matcher/expression_algo_test.cpp
@@ -73,9 +73,10 @@ TEST(ExpressionAlgoIsSubsetOf, NullAndOmittedField) {
// an Undefined type.
BSONObj undefined = fromjson("{a: undefined}");
const CollatorInterface* collator = nullptr;
- ASSERT_EQUALS(ErrorCodes::BadValue,
- MatchExpressionParser::parse(
- undefined, ExtensionsCallbackDisallowExtensions(), collator).getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::BadValue,
+ MatchExpressionParser::parse(undefined, ExtensionsCallbackDisallowExtensions(), collator)
+ .getStatus());
ParsedMatchExpression empty("{}");
ParsedMatchExpression null("{a: null}");
@@ -839,11 +840,11 @@ TEST(SplitMatchExpression, ComplexMatchExpressionSplitsCorrectly) {
splitExpr.second->serialize(&secondBob);
ASSERT_EQUALS(firstBob.obj(), fromjson("{$or: [{'a.b': {$eq: 3}}, {'a.b.c': {$eq: 4}}]}"));
- ASSERT_EQUALS(secondBob.obj(),
- fromjson(
- "{$and: [{$nor: [{$and: [{x: {$size: 2}}]}]}, {$nor: [{x: {$gt: 4}}, {$and: "
- "[{$nor: [{$and: [{x: "
- "{$eq: 1}}]}]}, {y: {$eq: 3}}]}]}]}"));
+ ASSERT_EQUALS(
+ secondBob.obj(),
+ fromjson("{$and: [{$nor: [{$and: [{x: {$size: 2}}]}]}, {$nor: [{x: {$gt: 4}}, {$and: "
+ "[{$nor: [{$and: [{x: "
+ "{$eq: 1}}]}]}, {y: {$eq: 3}}]}]}]}"));
}
TEST(MapOverMatchExpression, DoesMapOverLogicalNodes) {
@@ -909,9 +910,9 @@ TEST(MapOverMatchExpression, DoesMapOverNodesWithMultipleChildren) {
ASSERT_OK(swMatchExpression.getStatus());
size_t nodeCount = 0;
- expression::mapOver(swMatchExpression.getValue().get(),
- [&nodeCount](MatchExpression* expression, std::string path)
- -> void { ++nodeCount; });
+ expression::mapOver(
+ swMatchExpression.getValue().get(),
+ [&nodeCount](MatchExpression* expression, std::string path) -> void { ++nodeCount; });
ASSERT_EQ(nodeCount, 3U);
}
diff --git a/src/mongo/db/matcher/expression_array.h b/src/mongo/db/matcher/expression_array.h
index a51f6c7ade5..0ade4faeab0 100644
--- a/src/mongo/db/matcher/expression_array.h
+++ b/src/mongo/db/matcher/expression_array.h
@@ -33,8 +33,8 @@
#include <vector>
#include "mongo/base/status.h"
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_leaf.h"
diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp
index 222bece16a4..9970384577b 100644
--- a/src/mongo/db/matcher/expression_geo.cpp
+++ b/src/mongo/db/matcher/expression_geo.cpp
@@ -30,11 +30,11 @@
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault
-#include "mongo/platform/basic.h"
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/geo/geoparser.h"
-#include "mongo/util/mongoutils/str.h"
+#include "mongo/platform/basic.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
@@ -133,8 +133,8 @@ Status GeoExpression::parseFrom(const BSONObj& obj) {
if (GeoExpression::INTERSECT == predicate) {
if (!geoContainer->supportsProject(SPHERE)) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "$geoIntersect not supported with provided geometry: " << obj);
+ str::stream() << "$geoIntersect not supported with provided geometry: "
+ << obj);
}
geoContainer->projectInto(SPHERE);
}
@@ -219,7 +219,8 @@ Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
return Status(ErrorCodes::BadValue,
mongoutils::str::stream()
<< "geo near accepts just one argument when querying for a GeoJSON "
- << "point. Extra field found: " << objIt.next());
+ << "point. Extra field found: "
+ << objIt.next());
}
// Parse "new" near:
@@ -231,8 +232,8 @@ Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
BSONObj::MatchType matchType = static_cast<BSONObj::MatchType>(e.getGtLtOp());
if (BSONObj::opNEAR != matchType) {
return Status(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "invalid geo near query operator: " << e.fieldName());
+ mongoutils::str::stream() << "invalid geo near query operator: "
+ << e.fieldName());
}
// Iterate over the argument.
@@ -247,7 +248,9 @@ Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
return Status(ErrorCodes::BadValue,
str::stream()
<< "invalid point in geo near query $geometry argument: "
- << embeddedObj << " " << status.reason());
+ << embeddedObj
+ << " "
+ << status.reason());
}
uassert(16681,
"$near requires geojson point, given " + embeddedObj.toString(),
diff --git a/src/mongo/db/matcher/expression_geo_test.cpp b/src/mongo/db/matcher/expression_geo_test.cpp
index 0bc96f33e7f..52ed8ac77e1 100644
--- a/src/mongo/db/matcher/expression_geo_test.cpp
+++ b/src/mongo/db/matcher/expression_geo_test.cpp
@@ -34,9 +34,9 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
-#include "mongo/db/matcher/matcher.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_geo.h"
+#include "mongo/db/matcher/matcher.h"
#include "mongo/stdx/memory.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp
index 04529e40e48..26a3e7e4846 100644
--- a/src/mongo/db/matcher/expression_leaf.cpp
+++ b/src/mongo/db/matcher/expression_leaf.cpp
@@ -31,11 +31,11 @@
#include "mongo/db/matcher/expression_leaf.h"
#include <cmath>
-#include <unordered_map>
#include <pcrecpp.h>
+#include <unordered_map>
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/config.h"
#include "mongo/db/field_ref.h"
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/db/matcher/expression_leaf.h b/src/mongo/db/matcher/expression_leaf.h
index 21d0b63365d..bb7b1f54c2f 100644
--- a/src/mongo/db/matcher/expression_leaf.h
+++ b/src/mongo/db/matcher/expression_leaf.h
@@ -32,8 +32,8 @@
#include <unordered_map>
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/db/matcher/expression_leaf_test.cpp b/src/mongo/db/matcher/expression_leaf_test.cpp
index a1a1fe6cb7d..5abec471bcb 100644
--- a/src/mongo/db/matcher/expression_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_leaf_test.cpp
@@ -32,9 +32,9 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
-#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_leaf.h"
+#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_parser.cpp b/src/mongo/db/matcher/expression_parser.cpp
index e0b3b86fd62..97af471a317 100644
--- a/src/mongo/db/matcher/expression_parser.cpp
+++ b/src/mongo/db/matcher/expression_parser.cpp
@@ -358,8 +358,8 @@ StatusWithMatchExpression MatchExpressionParser::_parse(const BSONObj& obj, int
root->add(eq.release());
} else {
return {Status(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "unknown top level operator: " << e.fieldName())};
+ mongoutils::str::stream() << "unknown top level operator: "
+ << e.fieldName())};
}
continue;
diff --git a/src/mongo/db/matcher/expression_parser.h b/src/mongo/db/matcher/expression_parser.h
index 823412ecc73..3c150ef9c9d 100644
--- a/src/mongo/db/matcher/expression_parser.h
+++ b/src/mongo/db/matcher/expression_parser.h
@@ -160,6 +160,7 @@ private:
};
typedef stdx::function<StatusWithMatchExpression(
- const char* name, int type, const BSONObj& section)> MatchExpressionParserGeoCallback;
+ const char* name, int type, const BSONObj& section)>
+ MatchExpressionParserGeoCallback;
extern MatchExpressionParserGeoCallback expressionParserGeoCallback;
}
diff --git a/src/mongo/db/matcher/expression_parser_array_test.cpp b/src/mongo/db/matcher/expression_parser_array_test.cpp
index e37f6e8adc6..2a9d0467ce3 100644
--- a/src/mongo/db/matcher/expression_parser_array_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_array_test.cpp
@@ -214,12 +214,16 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef1) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << BSON("$eq" << match)));
@@ -237,12 +241,16 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef2) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << match));
@@ -261,11 +269,17 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef3) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "foo" << 12345);
+ << "$id"
+ << oid
+ << "foo"
+ << 12345);
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oidx << "foo" << 12345);
+ << "$id"
+ << oidx
+ << "foo"
+ << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << match));
const CollatorInterface* collator = nullptr;
@@ -278,10 +292,14 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef3) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "foo"
+ << 12345
+ << "bar"
+ << 678)))));
}
// Query with DBRef fields out of order.
@@ -289,16 +307,22 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef4) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db");
BSONObj matchOutOfOrder = BSON("$db"
<< "db"
- << "$id" << oid << "$ref"
+ << "$id"
+ << oid
+ << "$ref"
<< "coll");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << matchOutOfOrder));
@@ -318,13 +342,19 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef5) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "foo" << 12345);
+ << "$id"
+ << oid
+ << "foo"
+ << 12345);
BSONObj matchOutOfOrder = BSON("foo" << 12345 << "$id" << oid << "$ref"
<< "coll");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oidx << "foo" << 12345);
+ << "$id"
+ << oidx
+ << "foo"
+ << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchOutOfOrder));
const CollatorInterface* collator = nullptr;
@@ -337,10 +367,14 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef5) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "foo"
+ << 12345
+ << "bar"
+ << 678)))));
}
// Incomplete DBRef - $id missing.
@@ -348,13 +382,20 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef6) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "foo" << 12345);
+ << "$id"
+ << oid
+ << "foo"
+ << 12345);
BSONObj matchMissingID = BSON("$ref"
<< "coll"
- << "foo" << 12345);
+ << "foo"
+ << 12345);
BSONObj notMatch = BSON("$ref"
<< "collx"
- << "$id" << oid << "foo" << 12345);
+ << "$id"
+ << oid
+ << "foo"
+ << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchMissingID));
const CollatorInterface* collator = nullptr;
@@ -367,10 +408,14 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef6) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "foo"
+ << 12345
+ << "bar"
+ << 678)))));
}
// Incomplete DBRef - $ref missing.
@@ -378,12 +423,18 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef7) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "foo" << 12345);
+ << "$id"
+ << oid
+ << "foo"
+ << 12345);
BSONObj matchMissingRef = BSON("$id" << oid << "foo" << 12345);
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oidx << "foo" << 12345);
+ << "$id"
+ << oidx
+ << "foo"
+ << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchMissingRef));
const CollatorInterface* collator = nullptr;
@@ -396,10 +447,14 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef7) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "foo"
+ << 12345
+ << "bar"
+ << 678)))));
}
// Incomplete DBRef - $db only.
@@ -407,17 +462,24 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef8) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db"
- << "foo" << 12345);
+ << "foo"
+ << 12345);
BSONObj matchDBOnly = BSON("$db"
<< "db"
- << "foo" << 12345);
+ << "foo"
+ << 12345);
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "dbx"
- << "foo" << 12345);
+ << "foo"
+ << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchDBOnly));
const CollatorInterface* collator = nullptr;
@@ -430,12 +492,16 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef8) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id" << oid << "$db"
- << "db"
- << "foo" << 12345 << "bar" << 678)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "$db"
+ << "db"
+ << "foo"
+ << 12345
+ << "bar"
+ << 678)))));
}
TEST(MatchExpressionParserArrayTest, All1) {
diff --git a/src/mongo/db/matcher/expression_parser_leaf_test.cpp b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
index b0897f8468e..d644a2c9e31 100644
--- a/src/mongo/db/matcher/expression_parser_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
@@ -446,7 +446,9 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
OID oid = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db"))));
const CollatorInterface* collator = nullptr;
StatusWithMatchExpression result =
@@ -456,11 +458,15 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
<< "coll"
@@ -476,28 +482,39 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "dbx"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$db"
<< "db"
<< "$ref"
<< "coll"
- << "$id" << oid))));
+ << "$id"
+ << oid))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db")))));
}
@@ -506,11 +523,15 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
OID oidy = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id" << oidy << "$db"
+ << "$id"
+ << oidy
+ << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db"))));
const CollatorInterface* collator = nullptr;
StatusWithMatchExpression result =
@@ -520,11 +541,15 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
<< "coll"
@@ -532,11 +557,15 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id" << oidy << "$db"
+ << "$id"
+ << oidy
+ << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oid << "$ref"
<< "coll"
@@ -544,7 +573,9 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "dbx")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oidy << "$ref"
<< "colly"
@@ -552,59 +583,87 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")
<< BSON("$ref"
<< "colly"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "dbx")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "colly"
- << "$id" << oidy << "$db"
+ << "$id"
+ << oidy
+ << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id" << oidy << "$db"
+ << "$id"
+ << oidy
+ << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")
<< BSON("$ref"
<< "colly"
- << "$id" << oidy << "$db"
+ << "$id"
+ << oidy
+ << "$db"
<< "db")))));
}
@@ -612,7 +671,10 @@ TEST(MatchExpressionParserLeafTest, INDBRefWithOptionalField1) {
OID oid = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id" << oid << "foo" << 12345))));
+ << "$id"
+ << oid
+ << "foo"
+ << 12345))));
const CollatorInterface* collator = nullptr;
StatusWithMatchExpression result =
MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator);
@@ -621,19 +683,28 @@ TEST(MatchExpressionParserLeafTest, INDBRefWithOptionalField1) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db"))));
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id" << oid << "foo" << 12345)))));
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "collx"
- << "$id" << oidx << "foo" << 12345)
- << BSON("$ref"
- << "coll"
- << "$id" << oid << "foo" << 12345)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "foo"
+ << 12345)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "collx"
+ << "$id"
+ << oidx
+ << "foo"
+ << 12345)
+ << BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "foo"
+ << 12345)))));
}
TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
@@ -648,7 +719,8 @@ TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
// second field is not $id
query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$foo" << 1))));
+ << "$foo"
+ << 1))));
result = MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator);
ASSERT_FALSE(result.isOK());
@@ -662,7 +734,8 @@ TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
// missing $id and $ref field
query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$db"
<< "test"
- << "foo" << 3))));
+ << "foo"
+ << 3))));
result = MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator);
ASSERT_FALSE(result.isOK());
}
@@ -1011,20 +1084,25 @@ TEST(MatchExpressionParserLeafTest, TypeBadString) {
const CollatorInterface* collator = nullptr;
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$type: null}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$type: true}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$type: {}}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(fromjson("{a: {$type: ObjectId('000000000000000000000000')}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$type: []}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserLeafTest, TypeStringnameDouble) {
@@ -1198,59 +1276,75 @@ TEST(MatchExpressionParserTest, BitTestMatchExpressionValidMask) {
const CollatorInterface* collator = nullptr;
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllSet" << 54)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllSet" << std::numeric_limits<long long>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllSet" << k2Power53)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllSet" << k2Power53 - 1)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllClear" << 54)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllClear" << std::numeric_limits<long long>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllClear" << k2Power53)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllClear" << k2Power53 - 1)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnySet" << 54)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnySet" << std::numeric_limits<long long>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnySet" << k2Power53)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnySet" << k2Power53 - 1)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnyClear" << 54)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnyClear" << std::numeric_limits<long long>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnyClear" << k2Power53)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnyClear" << k2Power53 - 1)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserTest, BitTestMatchExpressionValidArray) {
@@ -1263,63 +1357,79 @@ TEST(MatchExpressionParserTest, BitTestMatchExpressionValidArray) {
const CollatorInterface* collator = nullptr;
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllSet" << BSON_ARRAY(0))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllSet" << BSON_ARRAY(0 << 1 << 2 << 3))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllSet" << bsonArrayLongLong)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllSet" << BSON_ARRAY(std::numeric_limits<int>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllClear" << BSON_ARRAY(0))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllClear" << BSON_ARRAY(0 << 1 << 2 << 3))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllClear" << bsonArrayLongLong)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllClear" << BSON_ARRAY(std::numeric_limits<int>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnySet" << BSON_ARRAY(0))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnySet" << BSON_ARRAY(0 << 1 << 2 << 3))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnySet" << bsonArrayLongLong)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnySet" << BSON_ARRAY(std::numeric_limits<int>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnyClear" << BSON_ARRAY(0))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnyClear" << BSON_ARRAY(0 << 1 << 2 << 3))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnyClear" << bsonArrayLongLong)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnyClear" << BSON_ARRAY(std::numeric_limits<int>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserTest, BitTestMatchExpressionValidBinData) {
@@ -1328,94 +1438,117 @@ TEST(MatchExpressionParserTest, BitTestMatchExpressionValidBinData) {
MatchExpressionParser::parse(
fromjson("{a: {$bitsAllSet: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(
MatchExpressionParser::parse(
fromjson(
"{a: {$bitsAllClear: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(
MatchExpressionParser::parse(
fromjson("{a: {$bitsAnySet: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(
MatchExpressionParser::parse(
fromjson(
"{a: {$bitsAnyClear: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserTest, BitTestMatchExpressionInvalidMaskType) {
const CollatorInterface* collator = nullptr;
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: null}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: true}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: {}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: ''}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: null}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: true}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: {}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: ''}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
fromjson("{a: {$bitsAllClear: ObjectId('000000000000000000000000')}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: null}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: true}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: {}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: ''}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
fromjson("{a: {$bitsAnySet: ObjectId('000000000000000000000000')}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: null}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: true}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: {}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: ''}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
fromjson("{a: {$bitsAnyClear: ObjectId('000000000000000000000000')}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserTest, BitTestMatchExpressionInvalidMaskValue) {
@@ -1424,296 +1557,376 @@ TEST(MatchExpressionParserTest, BitTestMatchExpressionInvalidMaskValue) {
const CollatorInterface* collator = nullptr;
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: NaN}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: -54}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllSet" << std::numeric_limits<double>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllSet" << kLongLongMaxAsDouble)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: 2.5}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: NaN}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: -54}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllClear" << std::numeric_limits<double>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllClear" << kLongLongMaxAsDouble)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: 2.5}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: NaN}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: -54}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnySet" << std::numeric_limits<double>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnySet" << kLongLongMaxAsDouble)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: 2.5}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: NaN}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: -54}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnyClear" << std::numeric_limits<double>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnyClear" << kLongLongMaxAsDouble)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: 2.5}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserTest, BitTestMatchExpressionInvalidArray) {
const CollatorInterface* collator = nullptr;
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [null]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [true]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: ['']}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [{}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [[]]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [-1]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
fromjson(
"{a: {$bitsAllSet: [{$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [null]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [true]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: ['']}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [{}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [[]]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [-1]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
fromjson(
"{a: {$bitsAllClear: [{$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [null]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [true]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: ['']}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [{}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [[]]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [-1]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
fromjson(
"{a: {$bitsAnySet: [{$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [null]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [true]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: ['']}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [{}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [[]]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [-1]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
fromjson(
"{a: {$bitsAnyClear: [{$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserTest, BitTestMatchExpressionInvalidArrayValue) {
const CollatorInterface* collator = nullptr;
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [-54]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [NaN]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [-1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllSet" << BSON_ARRAY(std::numeric_limits<long long>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllSet" << BSON_ARRAY(std::numeric_limits<long long>::min()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [-54]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [NaN]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [-1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllClear" << BSON_ARRAY(std::numeric_limits<long long>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllClear" << BSON_ARRAY(std::numeric_limits<long long>::min()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [-54]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [NaN]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [-1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnySet" << BSON_ARRAY(std::numeric_limits<long long>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnySet" << BSON_ARRAY(std::numeric_limits<long long>::min()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [-54]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [NaN]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [-1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnyClear" << BSON_ARRAY(std::numeric_limits<long long>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnyClear" << BSON_ARRAY(std::numeric_limits<long long>::min()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
}
diff --git a/src/mongo/db/matcher/expression_serialization_test.cpp b/src/mongo/db/matcher/expression_serialization_test.cpp
index 3a764e420cd..ce321f326d7 100644
--- a/src/mongo/db/matcher/expression_serialization_test.cpp
+++ b/src/mongo/db/matcher/expression_serialization_test.cpp
@@ -666,19 +666,18 @@ TEST(SerializeBasic, ExpressionNotWithRegexValueAndOptionsSerializesCorrectly) {
TEST(SerializeBasic, ExpressionNotWithGeoSerializesCorrectly) {
const CollatorInterface* collator = nullptr;
- Matcher original(fromjson(
- "{x: {$not: {$geoIntersects: {$geometry: {type: 'Polygon', "
- "coordinates: [[[0,0], [5,0], "
- "[5, 5], [0, 5], [0, 0]]]}}}}}"),
+ Matcher original(fromjson("{x: {$not: {$geoIntersects: {$geometry: {type: 'Polygon', "
+ "coordinates: [[[0,0], [5,0], "
+ "[5, 5], [0, 5], [0, 0]]]}}}}}"),
ExtensionsCallbackNoop(),
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{$nor: [{$and: [{x: {$geoIntersects: {$geometry: {type: 'Polygon', coordinates: "
- "[[[0,0], "
- "[5,0], [5, 5], [0, 5], [0, 0]]]}}}}]}]}"));
+ ASSERT_EQ(
+ *reserialized.getQuery(),
+ fromjson("{$nor: [{$and: [{x: {$geoIntersects: {$geometry: {type: 'Polygon', coordinates: "
+ "[[[0,0], "
+ "[5,0], [5, 5], [0, 5], [0, 0]]]}}}}]}]}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj =
@@ -803,10 +802,10 @@ TEST(SerializeBasic, ExpressionGeoWithinSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{x: {$geoWithin: {$geometry: {type: 'Polygon', coordinates: [[[0,0], [10,0], "
- "[10, 10], [0, 10], [0, 0]]]}}}}"));
+ ASSERT_EQ(
+ *reserialized.getQuery(),
+ fromjson("{x: {$geoWithin: {$geometry: {type: 'Polygon', coordinates: [[[0,0], [10,0], "
+ "[10, 10], [0, 10], [0, 0]]]}}}}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: {type: 'Point', coordinates: [5, 5]}}");
@@ -826,10 +825,10 @@ TEST(SerializeBasic, ExpressionGeoIntersectsSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{x: {$geoIntersects: {$geometry: {type: 'Polygon', coordinates: [[[0,0], [5,0], "
- "[5, 5], [0, 5], [0, 0]]]}}}}"));
+ ASSERT_EQ(
+ *reserialized.getQuery(),
+ fromjson("{x: {$geoIntersects: {$geometry: {type: 'Polygon', coordinates: [[[0,0], [5,0], "
+ "[5, 5], [0, 5], [0, 0]]]}}}}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj =
@@ -849,17 +848,16 @@ TEST(SerializeBasic, ExpressionGeoIntersectsSerializesCorrectly) {
TEST(SerializeBasic, ExpressionNearSerializesCorrectly) {
const CollatorInterface* collator = nullptr;
Matcher original(
- fromjson(
- "{x: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 10, "
- "$minDistance: 1}}}"),
+ fromjson("{x: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 10, "
+ "$minDistance: 1}}}"),
ExtensionsCallbackNoop(),
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{x: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 10, "
- "$minDistance: 1}}}"));
+ ASSERT_EQ(
+ *reserialized.getQuery(),
+ fromjson("{x: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 10, "
+ "$minDistance: 1}}}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
@@ -874,9 +872,8 @@ TEST(SerializeBasic, ExpressionNearSphereSerializesCorrectly) {
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{x: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0, 0]}, "
- "$maxDistance: 10, $minDistance: 1}}}"));
+ fromjson("{x: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0, 0]}, "
+ "$maxDistance: 10, $minDistance: 1}}}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
@@ -888,9 +885,8 @@ TEST(SerializeBasic, ExpressionTextSerializesCorrectly) {
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{$text: {$search: 'a', $language: 'en', $caseSensitive: true, "
- "$diacriticSensitive: false}}"));
+ fromjson("{$text: {$search: 'a', $language: 'en', $caseSensitive: true, "
+ "$diacriticSensitive: false}}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
@@ -902,9 +898,8 @@ TEST(SerializeBasic, ExpressionTextWithDefaultLanguageSerializesCorrectly) {
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{$text: {$search: 'a', $language: '', $caseSensitive: false, "
- "$diacriticSensitive: false}}"));
+ fromjson("{$text: {$search: 'a', $language: '', $caseSensitive: false, "
+ "$diacriticSensitive: false}}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
diff --git a/src/mongo/db/matcher/expression_text.cpp b/src/mongo/db/matcher/expression_text.cpp
index 0a95a6e798d..8c8bd979cf8 100644
--- a/src/mongo/db/matcher/expression_text.cpp
+++ b/src/mongo/db/matcher/expression_text.cpp
@@ -59,13 +59,15 @@ Status TextMatchExpression::init(OperationContext* txn,
if (!db) {
return {ErrorCodes::IndexNotFound,
str::stream() << "text index required for $text query (no such collection '"
- << nss.ns() << "')"};
+ << nss.ns()
+ << "')"};
}
Collection* collection = db->getCollection(nss);
if (!collection) {
return {ErrorCodes::IndexNotFound,
str::stream() << "text index required for $text query (no such collection '"
- << nss.ns() << "')"};
+ << nss.ns()
+ << "')"};
}
std::vector<IndexDescriptor*> idxMatches;
collection->getIndexCatalog()->findIndexByType(txn, IndexNames::TEXT, idxMatches);
diff --git a/src/mongo/db/matcher/expression_text_base.cpp b/src/mongo/db/matcher/expression_text_base.cpp
index afed1a33e6f..625107d4aca 100644
--- a/src/mongo/db/matcher/expression_text_base.cpp
+++ b/src/mongo/db/matcher/expression_text_base.cpp
@@ -58,8 +58,10 @@ void TextMatchExpressionBase::serialize(BSONObjBuilder* out) const {
const fts::FTSQuery& ftsQuery = getFTSQuery();
out->append("$text",
BSON("$search" << ftsQuery.getQuery() << "$language" << ftsQuery.getLanguage()
- << "$caseSensitive" << ftsQuery.getCaseSensitive()
- << "$diacriticSensitive" << ftsQuery.getDiacriticSensitive()));
+ << "$caseSensitive"
+ << ftsQuery.getCaseSensitive()
+ << "$diacriticSensitive"
+ << ftsQuery.getDiacriticSensitive()));
}
bool TextMatchExpressionBase::equivalent(const MatchExpression* other) const {
diff --git a/src/mongo/db/matcher/expression_tree.cpp b/src/mongo/db/matcher/expression_tree.cpp
index 02da8991465..7cd2f8b2d60 100644
--- a/src/mongo/db/matcher/expression_tree.cpp
+++ b/src/mongo/db/matcher/expression_tree.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/matcher/expression_tree.h"
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_tree_test.cpp b/src/mongo/db/matcher/expression_tree_test.cpp
index 9799f25ccc4..8ede86c0f0d 100644
--- a/src/mongo/db/matcher/expression_tree_test.cpp
+++ b/src/mongo/db/matcher/expression_tree_test.cpp
@@ -33,8 +33,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression.h"
-#include "mongo/db/matcher/expression_tree.h"
#include "mongo/db/matcher/expression_leaf.h"
+#include "mongo/db/matcher/expression_tree.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_where.cpp b/src/mongo/db/matcher/expression_where.cpp
index 63b705f6127..1e24dbc2501 100644
--- a/src/mongo/db/matcher/expression_where.cpp
+++ b/src/mongo/db/matcher/expression_where.cpp
@@ -34,11 +34,11 @@
#include "mongo/base/init.h"
#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/namespace_string.h"
#include "mongo/db/client.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_parser.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/scripting/engine.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/db/matcher/matchable.cpp b/src/mongo/db/matcher/matchable.cpp
index bb5671ea801..b64ebdd572a 100644
--- a/src/mongo/db/matcher/matchable.cpp
+++ b/src/mongo/db/matcher/matchable.cpp
@@ -28,9 +28,9 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/matchable.h"
+#include "mongo/db/jsobj.h"
+#include "mongo/platform/basic.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/matcher.cpp b/src/mongo/db/matcher/matcher.cpp
index 6fa7eec44b5..faf0f6f1170 100644
--- a/src/mongo/db/matcher/matcher.cpp
+++ b/src/mongo/db/matcher/matcher.cpp
@@ -31,11 +31,11 @@
#include "mongo/platform/basic.h"
#include "mongo/base/init.h"
+#include "mongo/db/exec/working_set.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/matcher/matcher.h"
#include "mongo/db/matcher/path.h"
-#include "mongo/db/exec/working_set.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/stacktrace.h"
diff --git a/src/mongo/db/matcher/path.cpp b/src/mongo/db/matcher/path.cpp
index 3dd9374faa7..deeb7a75933 100644
--- a/src/mongo/db/matcher/path.cpp
+++ b/src/mongo/db/matcher/path.cpp
@@ -28,10 +28,10 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
+#include "mongo/db/matcher/path.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/path_internal.h"
-#include "mongo/db/matcher/path.h"
+#include "mongo/platform/basic.h"
namespace mongo {
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 59ceb56e187..32f88c57d51 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -45,9 +45,9 @@
#include "mongo/db/server_options.h"
#include "mongo/db/server_options_helpers.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
-#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/logger/console_appender.h"
#include "mongo/logger/message_event_utf8_encoder.h"
+#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/net/ssl_options.h"
@@ -106,13 +106,13 @@ Status addMongodOptions(moe::OptionSection* options) {
// Way to enable or disable auth in JSON Config
general_options
.addOptionChaining(
- "security.authorization",
- "",
- moe::String,
- "How the database behaves with respect to authorization of clients. "
- "Options are \"disabled\", which means that authorization checks are not "
- "performed, and \"enabled\" which means that a client cannot perform actions it is "
- "not authorized to do.")
+ "security.authorization",
+ "",
+ moe::String,
+ "How the database behaves with respect to authorization of clients. "
+ "Options are \"disabled\", which means that authorization checks are not "
+ "performed, and \"enabled\" which means that a client cannot perform actions it is "
+ "not authorized to do.")
.setSources(moe::SourceYAMLConfig)
.format("(:?disabled)|(:?enabled)", "(disabled/enabled)");
@@ -137,35 +137,35 @@ Status addMongodOptions(moe::OptionSection* options) {
// Diagnostic Options
- general_options.addOptionChaining("diaglog",
- "diaglog",
- moe::Int,
- "DEPRECATED: 0=off 1=W 2=R 3=both 7=W+some reads")
+ general_options
+ .addOptionChaining(
+ "diaglog", "diaglog", moe::Int, "DEPRECATED: 0=off 1=W 2=R 3=both 7=W+some reads")
.hidden()
.setSources(moe::SourceAllLegacy);
- general_options.addOptionChaining("operationProfiling.slowOpThresholdMs",
- "slowms",
- moe::Int,
- "value of slow for profile and console log")
+ general_options
+ .addOptionChaining("operationProfiling.slowOpThresholdMs",
+ "slowms",
+ moe::Int,
+ "value of slow for profile and console log")
.setDefault(moe::Value(100));
general_options.addOptionChaining("profile", "profile", moe::Int, "0=off 1=slow, 2=all")
.setSources(moe::SourceAllLegacy);
- general_options.addOptionChaining(
- "operationProfiling.mode", "", moe::String, "(off/slowOp/all)")
+ general_options
+ .addOptionChaining("operationProfiling.mode", "", moe::String, "(off/slowOp/all)")
.setSources(moe::SourceYAMLConfig)
.format("(:?off)|(:?slowOp)|(:?all)", "(off/slowOp/all)");
- general_options.addOptionChaining(
- "cpu", "cpu", moe::Switch, "periodically show cpu and iowait utilization")
+ general_options
+ .addOptionChaining(
+ "cpu", "cpu", moe::Switch, "periodically show cpu and iowait utilization")
.setSources(moe::SourceAllLegacy);
- general_options.addOptionChaining("sysinfo",
- "sysinfo",
- moe::Switch,
- "print some diagnostic system information")
+ general_options
+ .addOptionChaining(
+ "sysinfo", "sysinfo", moe::Switch, "print some diagnostic system information")
.setSources(moe::SourceAllLegacy);
// Storage Options
@@ -201,55 +201,59 @@ Status addMongodOptions(moe::OptionSection* options) {
moe::Switch,
"each database will be stored in a separate directory");
- storage_options.addOptionChaining(
- "storage.queryableBackupMode",
- "queryableBackupMode",
- moe::Switch,
- "enable read-only mode - if true the server will not accept writes.")
+ storage_options
+ .addOptionChaining("storage.queryableBackupMode",
+ "queryableBackupMode",
+ moe::Switch,
+ "enable read-only mode - if true the server will not accept writes.")
.setSources(moe::SourceAll)
.hidden();
- general_options.addOptionChaining(
- "noIndexBuildRetry",
- "noIndexBuildRetry",
- moe::Switch,
- "don't retry any index builds that were interrupted by shutdown")
+ general_options
+ .addOptionChaining("noIndexBuildRetry",
+ "noIndexBuildRetry",
+ moe::Switch,
+ "don't retry any index builds that were interrupted by shutdown")
.setSources(moe::SourceAllLegacy);
- general_options.addOptionChaining(
- "storage.indexBuildRetry",
- "",
- moe::Bool,
- "don't retry any index builds that were interrupted by shutdown")
+ general_options
+ .addOptionChaining("storage.indexBuildRetry",
+ "",
+ moe::Bool,
+ "don't retry any index builds that were interrupted by shutdown")
.setSources(moe::SourceYAMLConfig);
- storage_options.addOptionChaining(
- "noprealloc",
- "noprealloc",
- moe::Switch,
- "disable data file preallocation - will often hurt performance")
+ storage_options
+ .addOptionChaining("noprealloc",
+ "noprealloc",
+ moe::Switch,
+ "disable data file preallocation - will often hurt performance")
.setSources(moe::SourceAllLegacy);
- storage_options.addOptionChaining(
- "storage.mmapv1.preallocDataFiles",
- "",
- moe::Bool,
- "disable data file preallocation - will often hurt performance",
- "storage.preallocDataFiles").setSources(moe::SourceYAMLConfig);
-
- storage_options.addOptionChaining("storage.mmapv1.nsSize",
- "nssize",
- moe::Int,
- ".ns file size (in MB) for new databases",
- "storage.nsSize").setDefault(moe::Value(16));
+ storage_options
+ .addOptionChaining("storage.mmapv1.preallocDataFiles",
+ "",
+ moe::Bool,
+ "disable data file preallocation - will often hurt performance",
+ "storage.preallocDataFiles")
+ .setSources(moe::SourceYAMLConfig);
- storage_options.addOptionChaining(
- "storage.mmapv1.quota.enforced",
- "quota",
- moe::Switch,
- "limits each database to a certain number of files (8 default)",
- "storage.quota.enforced").incompatibleWith("keyFile");
+ storage_options
+ .addOptionChaining("storage.mmapv1.nsSize",
+ "nssize",
+ moe::Int,
+ ".ns file size (in MB) for new databases",
+ "storage.nsSize")
+ .setDefault(moe::Value(16));
+
+ storage_options
+ .addOptionChaining("storage.mmapv1.quota.enforced",
+ "quota",
+ moe::Switch,
+ "limits each database to a certain number of files (8 default)",
+ "storage.quota.enforced")
+ .incompatibleWith("keyFile");
storage_options.addOptionChaining("storage.mmapv1.quota.maxFilesPerDB",
"quotaFiles",
@@ -263,10 +267,11 @@ Status addMongodOptions(moe::OptionSection* options) {
"use a smaller default file size",
"storage.smallFiles");
- storage_options.addOptionChaining("storage.syncPeriodSecs",
- "syncdelay",
- moe::Double,
- "seconds between disk syncs (0=never, but not recommended)")
+ storage_options
+ .addOptionChaining("storage.syncPeriodSecs",
+ "syncdelay",
+ moe::Double,
+ "seconds between disk syncs (0=never, but not recommended)")
.setDefault(moe::Value(60.0));
// Upgrade and repair are disallowed in JSON configs since they trigger very heavyweight
@@ -284,18 +289,19 @@ Status addMongodOptions(moe::OptionSection* options) {
// Javascript Options
- general_options.addOptionChaining(
- "noscripting", "noscripting", moe::Switch, "disable scripting engine")
+ general_options
+ .addOptionChaining("noscripting", "noscripting", moe::Switch, "disable scripting engine")
.setSources(moe::SourceAllLegacy);
- general_options.addOptionChaining(
- "security.javascriptEnabled", "", moe::Bool, "Enable javascript execution")
+ general_options
+ .addOptionChaining(
+ "security.javascriptEnabled", "", moe::Bool, "Enable javascript execution")
.setSources(moe::SourceYAMLConfig);
// Query Options
- general_options.addOptionChaining(
- "notablescan", "notablescan", moe::Switch, "do not allow table scans")
+ general_options
+ .addOptionChaining("notablescan", "notablescan", moe::Switch, "do not allow table scans")
.setSources(moe::SourceAllLegacy);
// Journaling Options
@@ -304,10 +310,11 @@ Status addMongodOptions(moe::OptionSection* options) {
storage_options.addOptionChaining("journal", "journal", moe::Switch, "enable journaling")
.setSources(moe::SourceAllLegacy);
- storage_options.addOptionChaining("nojournal",
- "nojournal",
- moe::Switch,
- "disable journaling (journaling is on by default for 64 bit)")
+ storage_options
+ .addOptionChaining("nojournal",
+ "nojournal",
+ moe::Switch,
+ "disable journaling (journaling is on by default for 64 bit)")
.setSources(moe::SourceAllLegacy);
storage_options.addOptionChaining("dur", "dur", moe::Switch, "enable journaling")
@@ -323,14 +330,16 @@ Status addMongodOptions(moe::OptionSection* options) {
.setSources(moe::SourceYAMLConfig);
// Two ways to set durability diagnostic options. durOptions is deprecated
- storage_options.addOptionChaining("storage.mmapv1.journal.debugFlags",
- "journalOptions",
- moe::Int,
- "journal diagnostic options",
- "storage.journal.debugFlags").incompatibleWith("durOptions");
-
- storage_options.addOptionChaining(
- "durOptions", "durOptions", moe::Int, "durability diagnostic options")
+ storage_options
+ .addOptionChaining("storage.mmapv1.journal.debugFlags",
+ "journalOptions",
+ moe::Int,
+ "journal diagnostic options",
+ "storage.journal.debugFlags")
+ .incompatibleWith("durOptions");
+
+ storage_options
+ .addOptionChaining("durOptions", "durOptions", moe::Int, "durability diagnostic options")
.hidden()
.setSources(moe::SourceAllLegacy)
.incompatibleWith("storage.mmapv1.journal.debugFlags");
@@ -342,10 +351,9 @@ Status addMongodOptions(moe::OptionSection* options) {
"storage.mmapv1.journal.commitIntervalMs");
// Deprecated option that we don't want people to use for performance reasons
- storage_options.addOptionChaining("nopreallocj",
- "nopreallocj",
- moe::Switch,
- "don't preallocate journal files")
+ storage_options
+ .addOptionChaining(
+ "nopreallocj", "nopreallocj", moe::Switch, "don't preallocate journal files")
.hidden()
.setSources(moe::SourceAllLegacy);
@@ -367,33 +375,33 @@ Status addMongodOptions(moe::OptionSection* options) {
.incompatibleWith("replication.replSetName")
.setSources(moe::SourceAllLegacy);
- ms_options.addOptionChaining(
- "source", "source", moe::String, "when slave: specify master as <server:port>")
+ ms_options
+ .addOptionChaining(
+ "source", "source", moe::String, "when slave: specify master as <server:port>")
.incompatibleWith("replication.replSet")
.incompatibleWith("replication.replSetName")
.setSources(moe::SourceAllLegacy);
- ms_options.addOptionChaining("only",
- "only",
- moe::String,
- "when slave: specify a single database to replicate")
+ ms_options
+ .addOptionChaining(
+ "only", "only", moe::String, "when slave: specify a single database to replicate")
.incompatibleWith("replication.replSet")
.incompatibleWith("replication.replSetName")
.setSources(moe::SourceAllLegacy);
- ms_options.addOptionChaining(
- "slavedelay",
- "slavedelay",
- moe::Int,
- "specify delay (in seconds) to be used when applying master ops to slave")
+ ms_options
+ .addOptionChaining(
+ "slavedelay",
+ "slavedelay",
+ moe::Int,
+ "specify delay (in seconds) to be used when applying master ops to slave")
.incompatibleWith("replication.replSet")
.incompatibleWith("replication.replSetName")
.setSources(moe::SourceAllLegacy);
- ms_options.addOptionChaining("autoresync",
- "autoresync",
- moe::Switch,
- "automatically resync if slave data is stale")
+ ms_options
+ .addOptionChaining(
+ "autoresync", "autoresync", moe::Switch, "automatically resync if slave data is stale")
.incompatibleWith("replication.replSet")
.incompatibleWith("replication.replSetName")
.setSources(moe::SourceAllLegacy);
@@ -407,21 +415,22 @@ Status addMongodOptions(moe::OptionSection* options) {
"size to use (in MB) for replication op log. default is 5% of disk space "
"(i.e. large is good)");
- rs_options.addOptionChaining("replication.replSet",
- "replSet",
- moe::String,
- "arg is <setname>[/<optionalseedhostlist>]")
+ rs_options
+ .addOptionChaining("replication.replSet",
+ "replSet",
+ moe::String,
+ "arg is <setname>[/<optionalseedhostlist>]")
.setSources(moe::SourceAllLegacy);
rs_options.addOptionChaining("replication.replSetName", "", moe::String, "arg is <setname>")
.setSources(moe::SourceYAMLConfig)
.format("[^/]+", "[replica set name with no \"/\"]");
- rs_options.addOptionChaining(
- "replication.secondaryIndexPrefetch",
- "replIndexPrefetch",
- moe::String,
- "specify index prefetching behavior (if secondary) [none|_id_only|all]")
+ rs_options
+ .addOptionChaining("replication.secondaryIndexPrefetch",
+ "replIndexPrefetch",
+ moe::String,
+ "specify index prefetching behavior (if secondary) [none|_id_only|all]")
.format("(:?none)|(:?_id_only)|(:?all)", "(none/_id_only/all)");
rs_options.addOptionChaining("replication.enableMajorityReadConcern",
@@ -431,73 +440,73 @@ Status addMongodOptions(moe::OptionSection* options) {
// Sharding Options
- sharding_options.addOptionChaining(
- "configsvr",
- "configsvr",
- moe::Switch,
- "declare this is a config db of a cluster; default port 27019; "
- "default dir /data/configdb")
+ sharding_options
+ .addOptionChaining("configsvr",
+ "configsvr",
+ moe::Switch,
+ "declare this is a config db of a cluster; default port 27019; "
+ "default dir /data/configdb")
.setSources(moe::SourceAllLegacy)
.incompatibleWith("shardsvr")
.incompatibleWith("nojournal");
- sharding_options.addOptionChaining(
- "shardsvr",
- "shardsvr",
- moe::Switch,
- "declare this is a shard db of a cluster; default port 27018")
+ sharding_options
+ .addOptionChaining("shardsvr",
+ "shardsvr",
+ moe::Switch,
+ "declare this is a shard db of a cluster; default port 27018")
.setSources(moe::SourceAllLegacy)
.incompatibleWith("configsvr");
sharding_options
.addOptionChaining(
- "sharding.clusterRole",
- "",
- moe::String,
- "Choose what role this mongod has in a sharded cluster. Possible values are:\n"
- " \"configsvr\": Start this node as a config server. Starts on port 27019 by "
- "default."
- " \"shardsvr\": Start this node as a shard server. Starts on port 27018 by "
- "default.")
+ "sharding.clusterRole",
+ "",
+ moe::String,
+ "Choose what role this mongod has in a sharded cluster. Possible values are:\n"
+ " \"configsvr\": Start this node as a config server. Starts on port 27019 by "
+ "default."
+ " \"shardsvr\": Start this node as a shard server. Starts on port 27018 by "
+ "default.")
.setSources(moe::SourceYAMLConfig)
.format("(:?configsvr)|(:?shardsvr)", "(configsvr/shardsvr)");
sharding_options
.addOptionChaining(
- "sharding._overrideShardIdentity",
- "",
- moe::String,
- "overrides the shardIdentity document settings stored in the local storage with "
- "a MongoDB Extended JSON document in string format")
+ "sharding._overrideShardIdentity",
+ "",
+ moe::String,
+ "overrides the shardIdentity document settings stored in the local storage with "
+ "a MongoDB Extended JSON document in string format")
.setSources(moe::SourceYAMLConfig)
.incompatibleWith("configsvr")
.requires("storage.queryableBackupMode");
- sharding_options.addOptionChaining(
- "noMoveParanoia",
- "noMoveParanoia",
- moe::Switch,
- "turn off paranoid saving of data for the moveChunk command; default")
+ sharding_options
+ .addOptionChaining("noMoveParanoia",
+ "noMoveParanoia",
+ moe::Switch,
+ "turn off paranoid saving of data for the moveChunk command; default")
.hidden()
.setSources(moe::SourceAllLegacy)
.incompatibleWith("moveParanoia");
- sharding_options.addOptionChaining(
- "moveParanoia",
- "moveParanoia",
- moe::Switch,
- "turn on paranoid saving of data during the moveChunk command "
- "(used for internal system diagnostics)")
+ sharding_options
+ .addOptionChaining("moveParanoia",
+ "moveParanoia",
+ moe::Switch,
+ "turn on paranoid saving of data during the moveChunk command "
+ "(used for internal system diagnostics)")
.hidden()
.setSources(moe::SourceAllLegacy)
.incompatibleWith("noMoveParanoia");
- sharding_options.addOptionChaining(
- "sharding.archiveMovedChunks",
- "",
- moe::Bool,
- "config file option to turn on paranoid saving of data during the "
- "moveChunk command (used for internal system diagnostics)")
+ sharding_options
+ .addOptionChaining("sharding.archiveMovedChunks",
+ "",
+ moe::Bool,
+ "config file option to turn on paranoid saving of data during the "
+ "moveChunk command (used for internal system diagnostics)")
.hidden()
.setSources(moe::SourceYAMLConfig);
@@ -517,18 +526,20 @@ Status addMongodOptions(moe::OptionSection* options) {
// The following are legacy options that are disallowed in the JSON config file
- options->addOptionChaining(
- "fastsync",
- "fastsync",
- moe::Switch,
- "indicate that this instance is starting from a dbpath snapshot of the repl peer")
+ options
+ ->addOptionChaining(
+ "fastsync",
+ "fastsync",
+ moe::Switch,
+ "indicate that this instance is starting from a dbpath snapshot of the repl peer")
.hidden()
.setSources(moe::SourceAllLegacy);
- options->addOptionChaining("pretouch",
- "pretouch",
- moe::Int,
- "n pretouch threads for applying master/slave operations")
+ options
+ ->addOptionChaining("pretouch",
+ "pretouch",
+ moe::Int,
+ "n pretouch threads for applying master/slave operations")
.hidden()
.setSources(moe::SourceAllLegacy);
@@ -541,8 +552,8 @@ Status addMongodOptions(moe::OptionSection* options) {
.positional(1, 3)
.setSources(moe::SourceAllLegacy);
- options->addOptionChaining(
- "cacheSize", "cacheSize", moe::Long, "cache size (in MB) for rec store")
+ options
+ ->addOptionChaining("cacheSize", "cacheSize", moe::Long, "cache size (in MB) for rec store")
.hidden()
.setSources(moe::SourceAllLegacy);
@@ -1203,7 +1214,8 @@ Status storeMongodOptions(const moe::Environment& params, const std::vector<std:
if (x <= 0) {
return Status(ErrorCodes::BadValue,
str::stream() << "bad --oplogSize, arg must be greater than 0,"
- "found: " << x);
+ "found: "
+ << x);
}
// note a small size such as x==1 is ok for an arbiter.
if (x > 1000 && sizeof(void*) == 4) {
diff --git a/src/mongo/db/op_observer.cpp b/src/mongo/db/op_observer.cpp
index 25bf9866c7f..5910edd37bf 100644
--- a/src/mongo/db/op_observer.cpp
+++ b/src/mongo/db/op_observer.cpp
@@ -216,7 +216,9 @@ void OpObserver::onRenameCollection(OperationContext* txn,
std::string dbName = fromCollection.db().toString() + ".$cmd";
BSONObj cmdObj =
BSON("renameCollection" << fromCollection.ns() << "to" << toCollection.ns() << "stayTemp"
- << stayTemp << "dropTarget" << dropTarget);
+ << stayTemp
+ << "dropTarget"
+ << dropTarget);
repl::logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
diff --git a/src/mongo/db/operation_context_impl.cpp b/src/mongo/db/operation_context_impl.cpp
index f20d0a4eab8..139cfddd869 100644
--- a/src/mongo/db/operation_context_impl.cpp
+++ b/src/mongo/db/operation_context_impl.cpp
@@ -35,9 +35,9 @@
#include "mongo/db/client.h"
#include "mongo/db/concurrency/lock_state.h"
#include "mongo/db/curop.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/db/operation_context_noop.h b/src/mongo/db/operation_context_noop.h
index 75fdc841e20..92efb01802f 100644
--- a/src/mongo/db/operation_context_noop.h
+++ b/src/mongo/db/operation_context_noop.h
@@ -28,10 +28,10 @@
#pragma once
-#include "mongo/db/operation_context.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/locker_noop.h"
#include "mongo/db/curop.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/storage/recovery_unit_noop.h"
namespace mongo {
diff --git a/src/mongo/db/ops/field_checker.cpp b/src/mongo/db/ops/field_checker.cpp
index 0c71c7e5d07..4e8c8b82de6 100644
--- a/src/mongo/db/ops/field_checker.cpp
+++ b/src/mongo/db/ops/field_checker.cpp
@@ -51,7 +51,8 @@ Status isUpdatable(const FieldRef& field) {
if (part.empty()) {
return Status(ErrorCodes::EmptyFieldName,
mongoutils::str::stream()
- << "The update path '" << field.dottedField()
+ << "The update path '"
+ << field.dottedField()
<< "' contains an empty field name, which is not allowed.");
}
}
diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp
index de198beac53..c7648b0f254 100644
--- a/src/mongo/db/ops/insert.cpp
+++ b/src/mongo/db/ops/insert.cpp
@@ -42,8 +42,10 @@ StatusWith<BSONObj> fixDocumentForInsert(const BSONObj& doc) {
if (doc.objsize() > BSONObjMaxUserSize)
return StatusWith<BSONObj>(ErrorCodes::BadValue,
str::stream() << "object to insert too large"
- << ". size in bytes: " << doc.objsize()
- << ", max size: " << BSONObjMaxUserSize);
+ << ". size in bytes: "
+ << doc.objsize()
+ << ", max size: "
+ << BSONObjMaxUserSize);
bool firstElementIsId = false;
bool hasTimestampToFix = false;
@@ -162,9 +164,11 @@ Status userAllowedCreateNS(StringData db, StringData coll) {
if (db.size() + 1 /* dot */ + coll.size() > NamespaceString::MaxNsCollectionLen)
return Status(ErrorCodes::BadValue,
- str::stream()
- << "fully qualified namespace " << db << '.' << coll << " is too long "
- << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)");
+ str::stream() << "fully qualified namespace " << db << '.' << coll
+ << " is too long "
+ << "(max is "
+ << NamespaceString::MaxNsCollectionLen
+ << " bytes)");
// check spceial areas
diff --git a/src/mongo/db/ops/log_builder.cpp b/src/mongo/db/ops/log_builder.cpp
index 21baffe246c..355ccb092e8 100644
--- a/src/mongo/db/ops/log_builder.cpp
+++ b/src/mongo/db/ops/log_builder.cpp
@@ -89,8 +89,10 @@ Status LogBuilder::addToSetsWithNewFieldName(StringData name, const mutablebson:
if (!elemToSet.ok())
return Status(ErrorCodes::InternalError,
str::stream() << "Could not create new '" << name
- << "' element from existing element '" << val.getFieldName()
- << "' of type " << typeName(val.getType()));
+ << "' element from existing element '"
+ << val.getFieldName()
+ << "' of type "
+ << typeName(val.getType()));
return addToSets(elemToSet);
}
@@ -100,8 +102,10 @@ Status LogBuilder::addToSetsWithNewFieldName(StringData name, const BSONElement&
if (!elemToSet.ok())
return Status(ErrorCodes::InternalError,
str::stream() << "Could not create new '" << name
- << "' element from existing element '" << val.fieldName()
- << "' of type " << typeName(val.type()));
+ << "' element from existing element '"
+ << val.fieldName()
+ << "' of type "
+ << typeName(val.type()));
return addToSets(elemToSet);
}
diff --git a/src/mongo/db/ops/log_builder_test.cpp b/src/mongo/db/ops/log_builder_test.cpp
index f2a3d20aa78..a957194e56f 100644
--- a/src/mongo/db/ops/log_builder_test.cpp
+++ b/src/mongo/db/ops/log_builder_test.cpp
@@ -106,11 +106,10 @@ TEST(LogBuilder, AddOneToEach) {
ASSERT_OK(lb.addToUnsets("x.y"));
- ASSERT_EQUALS(mongo::fromjson(
- "{ "
- " $set : { 'a.b' : 1 }, "
- " $unset : { 'x.y' : true } "
- "}"),
+ ASSERT_EQUALS(mongo::fromjson("{ "
+ " $set : { 'a.b' : 1 }, "
+ " $unset : { 'x.y' : true } "
+ "}"),
doc);
}
@@ -164,11 +163,10 @@ TEST(LogBuilder, VerifySetsAreGrouped) {
ASSERT_TRUE(elt_xy.ok());
ASSERT_OK(lb.addToSets(elt_xy));
- ASSERT_EQUALS(mongo::fromjson(
- "{ $set : {"
- " 'a.b' : 1, "
- " 'x.y' : 1 "
- "} }"),
+ ASSERT_EQUALS(mongo::fromjson("{ $set : {"
+ " 'a.b' : 1, "
+ " 'x.y' : 1 "
+ "} }"),
doc);
}
@@ -179,11 +177,10 @@ TEST(LogBuilder, VerifyUnsetsAreGrouped) {
ASSERT_OK(lb.addToUnsets("a.b"));
ASSERT_OK(lb.addToUnsets("x.y"));
- ASSERT_EQUALS(mongo::fromjson(
- "{ $unset : {"
- " 'a.b' : true, "
- " 'x.y' : true "
- "} }"),
+ ASSERT_EQUALS(mongo::fromjson("{ $unset : {"
+ " 'a.b' : true, "
+ " 'x.y' : true "
+ "} }"),
doc);
}
diff --git a/src/mongo/db/ops/modifier_add_to_set.cpp b/src/mongo/db/ops/modifier_add_to_set.cpp
index 383991c2b1f..ffa9c28ddf4 100644
--- a/src/mongo/db/ops/modifier_add_to_set.cpp
+++ b/src/mongo/db/ops/modifier_add_to_set.cpp
@@ -124,7 +124,8 @@ Status ModifierAddToSet::init(const BSONElement& modExpr, const Options& opts, b
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
// TODO: The driver could potentially do this re-writing.
@@ -247,11 +248,12 @@ Status ModifierAddToSet::prepare(mb::Element root, StringData matchedField, Exec
if (_preparedState->elemFound.getType() != mongo::Array) {
mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
return Status(ErrorCodes::BadValue,
- str::stream()
- << "Cannot apply $addToSet to a non-array field. Field named '"
- << _preparedState->elemFound.getFieldName() << "' has a non-array type "
- << typeName(_preparedState->elemFound.getType()) << " in the document "
- << idElem.toString());
+ str::stream() << "Cannot apply $addToSet to a non-array field. Field named '"
+ << _preparedState->elemFound.getFieldName()
+ << "' has a non-array type "
+ << typeName(_preparedState->elemFound.getType())
+ << " in the document "
+ << idElem.toString());
}
// If the array is empty, then we don't need to check anything: all of the values are
@@ -387,7 +389,8 @@ Status ModifierAddToSet::log(LogBuilder* logBuilder) const {
if (!status.isOK()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Could not append entry for $addToSet oplog entry."
- << "Underlying cause: " << status.toString());
+ << "Underlying cause: "
+ << status.toString());
}
curr = curr.rightSibling();
}
diff --git a/src/mongo/db/ops/modifier_bit.cpp b/src/mongo/db/ops/modifier_bit.cpp
index d6acbfe1ff8..fb7ae3f45eb 100644
--- a/src/mongo/db/ops/modifier_bit.cpp
+++ b/src/mongo/db/ops/modifier_bit.cpp
@@ -84,7 +84,8 @@ Status ModifierBit::init(const BSONElement& modExpr, const Options& opts, bool*
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
if (modExpr.type() != mongo::Object)
@@ -120,7 +121,9 @@ Status ModifierBit::init(const BSONElement& modExpr, const Options& opts, bool*
return Status(ErrorCodes::BadValue,
str::stream()
<< "The $bit modifier only supports 'and', 'or', and 'xor', not '"
- << payloadFieldName << "' which is an unknown operator: {" << curOp
+ << payloadFieldName
+ << "' which is an unknown operator: {"
+ << curOp
<< "}");
}
@@ -128,7 +131,9 @@ Status ModifierBit::init(const BSONElement& modExpr, const Options& opts, bool*
return Status(ErrorCodes::BadValue,
str::stream()
<< "The $bit modifier field must be an Integer(32/64 bit); a '"
- << typeName(curOp.type()) << "' is not supported here: {" << curOp
+ << typeName(curOp.type())
+ << "' is not supported here: {"
+ << curOp
<< "}");
const OpEntry entry = {SafeNum(curOp), op};
@@ -191,7 +196,8 @@ Status ModifierBit::prepare(mutablebson::Element root,
mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
return Status(ErrorCodes::BadValue,
str::stream() << "Cannot apply $bit to a value of non-integral type."
- << idElem.toString() << " has the field "
+ << idElem.toString()
+ << " has the field "
<< _preparedState->elemFound.getFieldName()
<< " of non-integer type "
<< typeName(_preparedState->elemFound.getType()));
@@ -260,7 +266,9 @@ Status ModifierBit::log(LogBuilder* logBuilder) const {
if (!logElement.ok()) {
return Status(ErrorCodes::InternalError,
str::stream() << "Could not append entry to $bit oplog entry: "
- << "set '" << _fieldRef.dottedField() << "' -> "
+ << "set '"
+ << _fieldRef.dottedField()
+ << "' -> "
<< _preparedState->newValue.debugString());
}
return logBuilder->addToSets(logElement);
diff --git a/src/mongo/db/ops/modifier_compare.cpp b/src/mongo/db/ops/modifier_compare.cpp
index 36f800202e4..2d05d29f360 100644
--- a/src/mongo/db/ops/modifier_compare.cpp
+++ b/src/mongo/db/ops/modifier_compare.cpp
@@ -77,7 +77,8 @@ Status ModifierCompare::init(const BSONElement& modExpr, const Options& opts, bo
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _updatePath.dottedField() << "'");
+ << _updatePath.dottedField()
+ << "'");
}
// Store value for later.
diff --git a/src/mongo/db/ops/modifier_current_date.cpp b/src/mongo/db/ops/modifier_current_date.cpp
index 75d0be014e3..cd328f5fe94 100644
--- a/src/mongo/db/ops/modifier_current_date.cpp
+++ b/src/mongo/db/ops/modifier_current_date.cpp
@@ -84,7 +84,8 @@ Status ModifierCurrentDate::init(const BSONElement& modExpr,
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _updatePath.dottedField() << "'");
+ << _updatePath.dottedField()
+ << "'");
}
// Validate and store the type to produce
@@ -113,7 +114,8 @@ Status ModifierCurrentDate::init(const BSONElement& modExpr,
str::stream()
<< "The only valid field of the option is '$type': "
"{$currentDate: {field : {$type: 'date/timestamp'}}}; "
- << "arg: " << argObj);
+ << "arg: "
+ << argObj);
}
}
}
diff --git a/src/mongo/db/ops/modifier_inc.cpp b/src/mongo/db/ops/modifier_inc.cpp
index 8bc6e2ff9a4..314ac6a5024 100644
--- a/src/mongo/db/ops/modifier_inc.cpp
+++ b/src/mongo/db/ops/modifier_inc.cpp
@@ -89,7 +89,8 @@ Status ModifierInc::init(const BSONElement& modExpr, const Options& opts, bool*
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
//
@@ -101,7 +102,9 @@ Status ModifierInc::init(const BSONElement& modExpr, const Options& opts, bool*
// include mod code, etc.
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Cannot " << (_mode == MODE_INC ? "increment" : "multiply")
- << " with non-numeric argument: {" << modExpr << "}");
+ << " with non-numeric argument: {"
+ << modExpr
+ << "}");
}
_val = modExpr;
@@ -172,7 +175,8 @@ Status ModifierInc::prepare(mutablebson::Element root,
mb::Element idElem = mb::findFirstChildNamed(root, "_id");
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Cannot apply " << (_mode == MODE_INC ? "$inc" : "$mul")
- << " to a value of non-numeric type. {" << idElem.toString()
+ << " to a value of non-numeric type. {"
+ << idElem.toString()
<< "} has the field '"
<< _preparedState->elemFound.getFieldName()
<< "' of non-numeric type "
@@ -191,8 +195,10 @@ Status ModifierInc::prepare(mutablebson::Element root,
mb::Element idElem = mb::findFirstChildNamed(root, "_id");
return Status(ErrorCodes::BadValue,
str::stream() << "Failed to apply $inc operations to current value ("
- << currentValue.debugString() << ") for document {"
- << idElem.toString() << "}");
+ << currentValue.debugString()
+ << ") for document {"
+ << idElem.toString()
+ << "}");
}
// If the values are identical (same type, same value), then this is a no-op.
@@ -254,8 +260,11 @@ Status ModifierInc::log(LogBuilder* logBuilder) const {
if (!logElement.ok()) {
return Status(ErrorCodes::InternalError,
str::stream() << "Could not append entry to "
- << (_mode == MODE_INC ? "$inc" : "$mul") << " oplog entry: "
- << "set '" << _fieldRef.dottedField() << "' -> "
+ << (_mode == MODE_INC ? "$inc" : "$mul")
+ << " oplog entry: "
+ << "set '"
+ << _fieldRef.dottedField()
+ << "' -> "
<< _preparedState->newValue.debugString());
}
diff --git a/src/mongo/db/ops/modifier_object_replace.cpp b/src/mongo/db/ops/modifier_object_replace.cpp
index 64bec6283d7..0cecd5a0d1e 100644
--- a/src/mongo/db/ops/modifier_object_replace.cpp
+++ b/src/mongo/db/ops/modifier_object_replace.cpp
@@ -86,7 +86,8 @@ Status ModifierObjectReplace::init(const BSONElement& modExpr,
// Impossible, really since the caller check this already...
return Status(ErrorCodes::BadValue,
str::stream() << "Document replacement expects a complete document"
- " but the type supplied was " << modExpr.type());
+ " but the type supplied was "
+ << modExpr.type());
}
// Object replacements never have positional operator.
@@ -150,8 +151,10 @@ Status ModifierObjectReplace::apply() const {
if (srcIdElement.compareWithBSONElement(dstIdElement, true) != 0) {
return Status(ErrorCodes::ImmutableField,
str::stream() << "The _id field cannot be changed from {"
- << srcIdElement.toString() << "} to {"
- << dstIdElement.toString() << "}.");
+ << srcIdElement.toString()
+ << "} to {"
+ << dstIdElement.toString()
+ << "}.");
}
continue;
}
diff --git a/src/mongo/db/ops/modifier_pop.cpp b/src/mongo/db/ops/modifier_pop.cpp
index c46fdd7a9bf..07682c976bf 100644
--- a/src/mongo/db/ops/modifier_pop.cpp
+++ b/src/mongo/db/ops/modifier_pop.cpp
@@ -89,7 +89,8 @@ Status ModifierPop::init(const BSONElement& modExpr, const Options& opts, bool*
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
//
@@ -136,9 +137,9 @@ Status ModifierPop::prepare(mutablebson::Element root,
// array.
if (_preparedState->pathFoundElement.getType() != Array) {
mb::Element idElem = mb::findFirstChildNamed(root, "_id");
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "Can only $pop from arrays. {" << idElem.toString()
+ return Status(
+ ErrorCodes::BadValue,
+ str::stream() << "Can only $pop from arrays. {" << idElem.toString()
<< "} has the field '"
<< _preparedState->pathFoundElement.getFieldName()
<< "' of non-array type "
@@ -190,7 +191,9 @@ Status ModifierPop::log(LogBuilder* logBuilder) const {
if (!logElement.ok()) {
return Status(ErrorCodes::InternalError,
str::stream() << "Could not append entry to $pop oplog entry: "
- << "set '" << _fieldRef.dottedField() << "' -> "
+ << "set '"
+ << _fieldRef.dottedField()
+ << "' -> "
<< _preparedState->pathFoundElement.toString());
}
return logBuilder->addToSets(logElement);
diff --git a/src/mongo/db/ops/modifier_pop_test.cpp b/src/mongo/db/ops/modifier_pop_test.cpp
index 06dd60a029e..8e288c73aa4 100644
--- a/src/mongo/db/ops/modifier_pop_test.cpp
+++ b/src/mongo/db/ops/modifier_pop_test.cpp
@@ -37,8 +37,8 @@
#include "mongo/bson/mutable/document.h"
#include "mongo/bson/mutable/mutable_bson_test_utils.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/ops/log_builder.h"
#include "mongo/db/json.h"
+#include "mongo/db/ops/log_builder.h"
#include "mongo/unittest/unittest.h"
namespace {
diff --git a/src/mongo/db/ops/modifier_pull.cpp b/src/mongo/db/ops/modifier_pull.cpp
index ce87c03e0f3..a172251eea6 100644
--- a/src/mongo/db/ops/modifier_pull.cpp
+++ b/src/mongo/db/ops/modifier_pull.cpp
@@ -94,7 +94,8 @@ Status ModifierPull::init(const BSONElement& modExpr, const Options& opts, bool*
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
_exprElt = modExpr;
diff --git a/src/mongo/db/ops/modifier_pull_all.cpp b/src/mongo/db/ops/modifier_pull_all.cpp
index 287dc4828b4..681769fd195 100644
--- a/src/mongo/db/ops/modifier_pull_all.cpp
+++ b/src/mongo/db/ops/modifier_pull_all.cpp
@@ -105,7 +105,8 @@ Status ModifierPullAll::init(const BSONElement& modExpr, const Options& opts, bo
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
//
@@ -155,9 +156,9 @@ Status ModifierPullAll::prepare(mutablebson::Element root,
// array.
if (_preparedState->pathFoundElement.getType() != Array) {
mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "Can only apply $pullAll to an array. " << idElem.toString()
+ return Status(
+ ErrorCodes::BadValue,
+ str::stream() << "Can only apply $pullAll to an array. " << idElem.toString()
<< " has the field "
<< _preparedState->pathFoundElement.getFieldName()
<< " of non-array type "
@@ -227,7 +228,9 @@ Status ModifierPullAll::log(LogBuilder* logBuilder) const {
if (!logElement.ok()) {
return Status(ErrorCodes::InternalError,
str::stream() << "Could not append entry to $pullAll oplog entry: "
- << "set '" << _fieldRef.dottedField() << "' -> "
+ << "set '"
+ << _fieldRef.dottedField()
+ << "' -> "
<< _preparedState->pathFoundElement.toString());
}
return logBuilder->addToSets(logElement);
diff --git a/src/mongo/db/ops/modifier_push.cpp b/src/mongo/db/ops/modifier_push.cpp
index bf3886cbe24..7dc0f24e712 100644
--- a/src/mongo/db/ops/modifier_push.cpp
+++ b/src/mongo/db/ops/modifier_push.cpp
@@ -104,9 +104,9 @@ Status parseEachMode(ModifierPush::ModifierPushMode pushMode,
*eachElem = modExpr.embeddedObject()[kEach];
if (eachElem->type() != Array) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "The argument to $each in $push must be"
- " an array but it was of type: " << typeName(eachElem->type()));
+ str::stream() << "The argument to $each in $push must be"
+ " an array but it was of type: "
+ << typeName(eachElem->type()));
}
// There must be only one $each clause.
@@ -149,8 +149,8 @@ Status parseEachMode(ModifierPush::ModifierPushMode pushMode,
seenPosition = true;
} else if (!mongoutils::str::equals(elem.fieldName(), kEach)) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "Unrecognized clause in $push: " << elem.fieldNameStringData());
+ str::stream() << "Unrecognized clause in $push: "
+ << elem.fieldNameStringData());
}
}
@@ -214,7 +214,8 @@ Status ModifierPush::init(const BSONElement& modExpr, const Options& opts, bool*
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
//
@@ -264,7 +265,8 @@ Status ModifierPush::init(const BSONElement& modExpr, const Options& opts, bool*
if (_pushMode == PUSH_ALL) {
return Status(ErrorCodes::BadValue,
str::stream() << "$pushAll requires an array of values "
- "but was given type: " << typeName(modExpr.type()));
+ "but was given type: "
+ << typeName(modExpr.type()));
}
_val = modExpr;
@@ -379,9 +381,9 @@ Status ModifierPush::init(const BSONElement& modExpr, const Options& opts, bool*
for (size_t i = 0; i < sortField.numParts(); i++) {
if (sortField.getPart(i).size() == 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "The $sort field is a dotted field "
- "but has an empty part: " << sortField.dottedField());
+ str::stream() << "The $sort field is a dotted field "
+ "but has an empty part: "
+ << sortField.dottedField());
}
}
}
@@ -442,7 +444,9 @@ Status ModifierPush::prepare(mutablebson::Element root,
str::stream() << "The field '" << _fieldRef.dottedField() << "'"
<< " must be an array but is of type "
<< typeName(_preparedState->elemFound.getType())
- << " in document {" << idElem.toString() << "}");
+ << " in document {"
+ << idElem.toString()
+ << "}");
}
} else {
return status;
@@ -477,7 +481,8 @@ Status pushFirstElement(mb::Element& arrayElem,
if (!fromElem.ok()) {
return Status(ErrorCodes::InvalidLength,
str::stream() << "The specified position (" << appendPos << "/" << pos
- << ") is invalid based on the length ( " << arraySize
+ << ") is invalid based on the length ( "
+ << arraySize
<< ") of the array");
}
diff --git a/src/mongo/db/ops/modifier_push_sorter.h b/src/mongo/db/ops/modifier_push_sorter.h
index c942f4e5da3..6d795ec372d 100644
--- a/src/mongo/db/ops/modifier_push_sorter.h
+++ b/src/mongo/db/ops/modifier_push_sorter.h
@@ -28,9 +28,9 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/bson/mutable/document.h"
#include "mongo/bson/mutable/element.h"
+#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/db/ops/modifier_push_test.cpp b/src/mongo/db/ops/modifier_push_test.cpp
index e148ce5f6ef..6e37fc24d74 100644
--- a/src/mongo/db/ops/modifier_push_test.cpp
+++ b/src/mongo/db/ops/modifier_push_test.cpp
@@ -35,10 +35,10 @@
#include "mongo/base/status.h"
#include "mongo/base/string_data.h"
-#include "mongo/bson/ordering.h"
#include "mongo/bson/mutable/algorithm.h"
#include "mongo/bson/mutable/document.h"
#include "mongo/bson/mutable/mutable_bson_test_utils.h"
+#include "mongo/bson/ordering.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/ops/log_builder.h"
@@ -659,13 +659,13 @@ TEST(SimpleObjMod, PrepareApplyNormal) {
}
TEST(SimpleObjMod, PrepareApplyDotted) {
- Document doc(fromjson(
- "{ _id : 1 , "
- " question : 'a', "
- " choices : { "
- " first : { choice : 'b' }, "
- " second : { choice : 'c' } }"
- "}"));
+ Document doc(
+ fromjson("{ _id : 1 , "
+ " question : 'a', "
+ " choices : { "
+ " first : { choice : 'b' }, "
+ " second : { choice : 'c' } }"
+ "}"));
Mod pushMod(fromjson("{$push: {'choices.first.votes': 1}}"));
ModifierInterface::ExecInfo execInfo;
@@ -676,13 +676,12 @@ TEST(SimpleObjMod, PrepareApplyDotted) {
ASSERT_OK(pushMod.apply());
ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(
- "{ _id : 1 , "
- " question : 'a', "
- " choices : { "
- " first : { choice : 'b', votes: [1]}, "
- " second : { choice : 'c' } }"
- "}"),
+ ASSERT_EQUALS(fromjson("{ _id : 1 , "
+ " question : 'a', "
+ " choices : { "
+ " first : { choice : 'b', votes: [1]}, "
+ " second : { choice : 'c' } }"
+ "}"),
doc);
Document logDoc;
@@ -1059,8 +1058,9 @@ public:
arrBuilder.append(*it);
}
- _modObj = BSON("$push" << BSON("a" << BSON("$each" << arrBuilder.arr() << "$slice" << slice
- << "$sort" << sort)));
+ _modObj = BSON(
+ "$push" << BSON(
+ "a" << BSON("$each" << arrBuilder.arr() << "$slice" << slice << "$sort" << sort)));
ASSERT_OK(_mod.init(_modObj["$push"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
diff --git a/src/mongo/db/ops/modifier_rename.cpp b/src/mongo/db/ops/modifier_rename.cpp
index 26dc2b23df9..c2935fb1906 100644
--- a/src/mongo/db/ops/modifier_rename.cpp
+++ b/src/mongo/db/ops/modifier_rename.cpp
@@ -29,8 +29,8 @@
#include "mongo/db/ops/modifier_rename.h"
#include "mongo/base/error_codes.h"
-#include "mongo/bson/mutable/document.h"
#include "mongo/bson/mutable/algorithm.h"
+#include "mongo/bson/mutable/document.h"
#include "mongo/db/ops/field_checker.h"
#include "mongo/db/ops/log_builder.h"
#include "mongo/db/ops/path_support.h"
@@ -95,15 +95,16 @@ Status ModifierRename::init(const BSONElement& modExpr, const Options& opts, boo
// Old restriction is that if the fields are the same then it is not allowed.
if (_fromFieldRef == _toFieldRef)
return Status(ErrorCodes::BadValue,
- str::stream()
- << "The source and target field for $rename must differ: " << modExpr);
+ str::stream() << "The source and target field for $rename must differ: "
+ << modExpr);
// TODO: Remove this restriction by allowing moving deeping from the 'from' path
// Old restriction is that if the to/from is on the same path it fails
if (_fromFieldRef.isPrefixOf(_toFieldRef) || _toFieldRef.isPrefixOf(_fromFieldRef)) {
return Status(ErrorCodes::BadValue,
str::stream() << "The source and target field for $rename must "
- "not be on the same path: " << modExpr);
+ "not be on the same path: "
+ << modExpr);
}
// TODO: We can remove this restriction as long as there is only one,
// or it is the same array -- should think on this a bit.
@@ -161,9 +162,11 @@ Status ModifierRename::prepare(mutablebson::Element root,
if (curr.getType() == Array)
return Status(ErrorCodes::BadValue,
str::stream() << "The source field cannot be an array element, '"
- << _fromFieldRef.dottedField() << "' in doc with "
+ << _fromFieldRef.dottedField()
+ << "' in doc with "
<< findElementNamed(root.leftChild(), "_id").toString()
- << " has an array field called '" << curr.getFieldName()
+ << " has an array field called '"
+ << curr.getFieldName()
<< "'");
curr = curr.parent();
}
@@ -191,9 +194,11 @@ Status ModifierRename::prepare(mutablebson::Element root,
if (curr.getType() == Array)
return Status(ErrorCodes::BadValue,
str::stream() << "The destination field cannot be an array element, '"
- << _fromFieldRef.dottedField() << "' in doc with "
+ << _fromFieldRef.dottedField()
+ << "' in doc with "
<< findElementNamed(root.leftChild(), "_id").toString()
- << " has an array field called '" << curr.getFieldName()
+ << " has an array field called '"
+ << curr.getFieldName()
<< "'");
curr = curr.parent();
}
diff --git a/src/mongo/db/ops/modifier_set.cpp b/src/mongo/db/ops/modifier_set.cpp
index c6966fae079..59f59c555ce 100644
--- a/src/mongo/db/ops/modifier_set.cpp
+++ b/src/mongo/db/ops/modifier_set.cpp
@@ -88,7 +88,8 @@ Status ModifierSet::init(const BSONElement& modExpr, const Options& opts, bool*
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
//
diff --git a/src/mongo/db/ops/modifier_unset.cpp b/src/mongo/db/ops/modifier_unset.cpp
index 673cbdb8d16..453b2d60d1c 100644
--- a/src/mongo/db/ops/modifier_unset.cpp
+++ b/src/mongo/db/ops/modifier_unset.cpp
@@ -83,7 +83,8 @@ Status ModifierUnset::init(const BSONElement& modExpr, const Options& opts, bool
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
diff --git a/src/mongo/db/ops/parsed_delete.cpp b/src/mongo/db/ops/parsed_delete.cpp
index 4e027390a79..b2f723e455e 100644
--- a/src/mongo/db/ops/parsed_delete.cpp
+++ b/src/mongo/db/ops/parsed_delete.cpp
@@ -35,8 +35,8 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/exec/delete.h"
-#include "mongo/db/ops/delete_request.h"
#include "mongo/db/matcher/extensions_callback_real.h"
+#include "mongo/db/ops/delete_request.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/query_planner_common.h"
diff --git a/src/mongo/db/ops/parsed_update.h b/src/mongo/db/ops/parsed_update.h
index c9bb03edf9a..eabef19b483 100644
--- a/src/mongo/db/ops/parsed_update.h
+++ b/src/mongo/db/ops/parsed_update.h
@@ -30,9 +30,9 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h"
+#include "mongo/db/ops/update_driver.h"
#include "mongo/db/query/collation/collator_interface.h"
#include "mongo/db/query/plan_executor.h"
-#include "mongo/db/ops/update_driver.h"
namespace mongo {
diff --git a/src/mongo/db/ops/path_support.cpp b/src/mongo/db/ops/path_support.cpp
index e430c7d57cc..a95b700acd0 100644
--- a/src/mongo/db/ops/path_support.cpp
+++ b/src/mongo/db/ops/path_support.cpp
@@ -66,7 +66,8 @@ Status maybePadTo(mutablebson::Element* elemArray, size_t sizeRequired) {
if (toPad > kMaxPaddingAllowed) {
return Status(ErrorCodes::CannotBackfillArray,
mongoutils::str::stream() << "can't backfill more than "
- << kMaxPaddingAllowed << " elements");
+ << kMaxPaddingAllowed
+ << " elements");
}
for (size_t i = 0; i < toPad; i++) {
@@ -139,8 +140,10 @@ Status findLongestPrefix(const FieldRef& prefix,
*elemFound = prev;
return Status(ErrorCodes::PathNotViable,
mongoutils::str::stream() << "cannot use the part (" << prefix.getPart(i - 1)
- << " of " << prefix.dottedField()
- << ") to traverse the element ({" << curr.toString()
+ << " of "
+ << prefix.dottedField()
+ << ") to traverse the element ({"
+ << curr.toString()
<< "})");
} else if (curr.ok()) {
*idxFound = i - 1;
diff --git a/src/mongo/db/ops/path_support_test.cpp b/src/mongo/db/ops/path_support_test.cpp
index 01345564ccd..f1de975850f 100644
--- a/src/mongo/db/ops/path_support_test.cpp
+++ b/src/mongo/db/ops/path_support_test.cpp
@@ -537,7 +537,9 @@ static void assertContains(const EqualityMatches& equalities, const BSONObj& wra
}
if (!it->second->getData().valuesEqual(value)) {
FAIL(stream() << "Equality match at path \"" << path << "\" contains value "
- << it->second->getData() << ", not value " << value);
+ << it->second->getData()
+ << ", not value "
+ << value);
}
}
@@ -827,12 +829,17 @@ static void assertParent(const EqualityMatches& equalities,
StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
if (foundParentPath != parentPath) {
FAIL(stream() << "Equality match parent at path \"" << foundParentPath
- << "\" does not match \"" << parentPath << "\"");
+ << "\" does not match \""
+ << parentPath
+ << "\"");
}
if (!parentEl.valuesEqual(value)) {
FAIL(stream() << "Equality match parent for \"" << pathStr << "\" at path \"" << parentPath
- << "\" contains value " << parentEl << ", not value " << value);
+ << "\" contains value "
+ << parentEl
+ << ", not value "
+ << value);
}
}
@@ -852,7 +859,8 @@ static void assertNoParent(const EqualityMatches& equalities, StringData pathStr
if (!parentEl.eoo()) {
StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
FAIL(stream() << "Equality matches contained parent for \"" << pathStr << "\" at \""
- << foundParentPath << "\"");
+ << foundParentPath
+ << "\"");
}
}
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index d5b83ae666b..c808d9aab34 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -96,7 +96,8 @@ UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& re
if (userInitiatedWritesAndNotPrimary) {
uassertStatusOK(Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating collection "
- << nsString.ns() << " during upsert"));
+ << nsString.ns()
+ << " during upsert"));
}
WriteUnitOfWork wuow(txn);
collection = db->createCollection(txn, nsString.ns(), CollectionOptions());
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
index ff21054e25a..8ff64538a9d 100644
--- a/src/mongo/db/ops/update.h
+++ b/src/mongo/db/ops/update.h
@@ -30,8 +30,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/curop.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/ops/update_request.h"
#include "mongo/db/ops/update_result.h"
diff --git a/src/mongo/db/ops/update_driver.cpp b/src/mongo/db/ops/update_driver.cpp
index 19abae7b96f..fad283493cd 100644
--- a/src/mongo/db/ops/update_driver.cpp
+++ b/src/mongo/db/ops/update_driver.cpp
@@ -112,7 +112,9 @@ Status UpdateDriver::parse(const BSONObj& updateExpr, const bool multi) {
str::stream() << "Modifiers operate on fields but we found type "
<< typeName(outerModElem.type())
<< " instead. For example: {$mod: {<field>: ...}}"
- << " not {" << outerModElem.toString() << "}");
+ << " not {"
+ << outerModElem.toString()
+ << "}");
}
// Check whether there are indeed mods under this modifier.
@@ -120,7 +122,9 @@ Status UpdateDriver::parse(const BSONObj& updateExpr, const bool multi) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << outerModElem.fieldName()
<< "' is empty. You must specify a field like so: "
- "{" << outerModElem.fieldName() << ": {<field>: ...}}");
+ "{"
+ << outerModElem.fieldName()
+ << ": {<field>: ...}}");
}
BSONObjIterator innerIter(outerModElem.embeddedObject());
@@ -146,7 +150,9 @@ inline Status UpdateDriver::addAndParse(const modifiertable::ModifierType type,
if (elem.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << elem.fieldName() << "' has no value in : " << elem
- << " which is not allowed for any $" << type << " mod.");
+ << " which is not allowed for any $"
+ << type
+ << " mod.");
}
unique_ptr<ModifierInterface> mod(modifiertable::makeUpdateMod(type));
@@ -275,7 +281,8 @@ Status UpdateDriver::update(StringData matchedField,
if (!targetFields->insert(execInfo.fieldRef[i], &other)) {
return Status(ErrorCodes::ConflictingUpdateOperators,
str::stream() << "Cannot update '" << other->dottedField()
- << "' and '" << execInfo.fieldRef[i]->dottedField()
+ << "' and '"
+ << execInfo.fieldRef[i]->dottedField()
<< "' at the same time");
}
@@ -371,7 +378,8 @@ BSONObj UpdateDriver::makeOplogEntryQuery(const BSONObj& doc, bool multi) const
} else {
uassert(16980,
str::stream() << "Multi-update operations require all documents to "
- "have an '_id' field. " << doc.toString(false, false),
+ "have an '_id' field. "
+ << doc.toString(false, false),
!multi);
return doc;
}
diff --git a/src/mongo/db/ops/update_lifecycle_impl.cpp b/src/mongo/db/ops/update_lifecycle_impl.cpp
index a5202948963..8f714519ea1 100644
--- a/src/mongo/db/ops/update_lifecycle_impl.cpp
+++ b/src/mongo/db/ops/update_lifecycle_impl.cpp
@@ -30,10 +30,10 @@
#include "mongo/db/ops/update_lifecycle_impl.h"
-#include "mongo/db/client.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/client.h"
#include "mongo/db/field_ref.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
diff --git a/src/mongo/db/ops/update_request.h b/src/mongo/db/ops/update_request.h
index 731c1195606..f6aa0e31d10 100644
--- a/src/mongo/db/ops/update_request.h
+++ b/src/mongo/db/ops/update_request.h
@@ -28,8 +28,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/curop.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/explain.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/ops/update_result.h b/src/mongo/db/ops/update_result.h
index 9c1c27c5a93..2c3107e3ea1 100644
--- a/src/mongo/db/ops/update_result.h
+++ b/src/mongo/db/ops/update_result.h
@@ -28,8 +28,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/curop.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 82e712b39ab..5371736b7bd 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -209,8 +209,8 @@ bool handleError(OperationContext* txn,
<< demangleName(typeid(ex)));
}
- ShardingState::get(txn)
- ->onStaleShardVersion(txn, wholeOp.ns, staleConfigException->getVersionReceived());
+ ShardingState::get(txn)->onStaleShardVersion(
+ txn, wholeOp.ns, staleConfigException->getVersionReceived());
out->staleConfigException =
stdx::make_unique<SendStaleConfigException>(*staleConfigException);
return false;
@@ -230,7 +230,8 @@ static WriteResult::SingleResult createIndex(OperationContext* txn,
uassert(ErrorCodes::TypeMismatch,
str::stream() << "Expected \"ns\" field of index description to be a "
"string, "
- "but found a " << typeName(nsElement.type()),
+ "but found a "
+ << typeName(nsElement.type()),
nsElement.type() == String);
const NamespaceString ns(nsElement.valueStringData());
uassert(ErrorCodes::InvalidOptions,
diff --git a/src/mongo/db/ops/write_ops_parsers.cpp b/src/mongo/db/ops/write_ops_parsers.cpp
index 1eb3e8693d2..a4ade410010 100644
--- a/src/mongo/db/ops/write_ops_parsers.cpp
+++ b/src/mongo/db/ops/write_ops_parsers.cpp
@@ -50,22 +50,31 @@ void checkTypeInArray(BSONType expectedType,
const BSONElement& arrayElem) {
uassert(ErrorCodes::TypeMismatch,
str::stream() << "Wrong type for " << arrayElem.fieldNameStringData() << '['
- << elem.fieldNameStringData() << "]. Expected a "
- << typeName(expectedType) << ", got a " << typeName(elem.type()) << '.',
+ << elem.fieldNameStringData()
+ << "]. Expected a "
+ << typeName(expectedType)
+ << ", got a "
+ << typeName(elem.type())
+ << '.',
elem.type() == expectedType);
}
void checkType(BSONType expectedType, const BSONElement& elem) {
uassert(ErrorCodes::TypeMismatch,
str::stream() << "Wrong type for '" << elem.fieldNameStringData() << "'. Expected a "
- << typeName(expectedType) << ", got a " << typeName(elem.type()) << '.',
+ << typeName(expectedType)
+ << ", got a "
+ << typeName(elem.type())
+ << '.',
elem.type() == expectedType);
}
void checkOpCountForCommand(size_t numOps) {
uassert(ErrorCodes::InvalidLength,
str::stream() << "Write batch sizes must be between 1 and " << kMaxWriteBatchSize
- << ". Got " << numOps << " operations.",
+ << ". Got "
+ << numOps
+ << " operations.",
numOps != 0 && numOps <= kMaxWriteBatchSize);
}
@@ -108,7 +117,8 @@ void parseWriteCommand(StringData dbName,
"writeConcern", "maxTimeMS", "shardVersion"};
uassert(ErrorCodes::FailedToParse,
str::stream() << "Unknown option to " << cmd.firstElementFieldName()
- << " command: " << fieldName,
+ << " command: "
+ << fieldName,
std::find(ignoredFields.begin(), ignoredFields.end(), fieldName) !=
ignoredFields.end());
}
@@ -116,7 +126,8 @@ void parseWriteCommand(StringData dbName,
uassert(ErrorCodes::FailedToParse,
str::stream() << "The " << uniqueFieldName << " option is required to the "
- << cmd.firstElementFieldName() << " command.",
+ << cmd.firstElementFieldName()
+ << " command.",
haveUniqueField);
}
}
diff --git a/src/mongo/db/ops/write_ops_parsers.h b/src/mongo/db/ops/write_ops_parsers.h
index 526703fadd3..376fac81874 100644
--- a/src/mongo/db/ops/write_ops_parsers.h
+++ b/src/mongo/db/ops/write_ops_parsers.h
@@ -29,8 +29,8 @@
#pragma once
#include "mongo/db/jsobj.h"
-#include "mongo/util/net/message.h"
#include "mongo/db/ops/write_ops.h"
+#include "mongo/util/net/message.h"
namespace mongo {
diff --git a/src/mongo/db/ops/write_ops_parsers_test.cpp b/src/mongo/db/ops/write_ops_parsers_test.cpp
index 252479b4915..78eb0212609 100644
--- a/src/mongo/db/ops/write_ops_parsers_test.cpp
+++ b/src/mongo/db/ops/write_ops_parsers_test.cpp
@@ -39,7 +39,9 @@ TEST(CommandWriteOpsParsers, CommonFields_BypassDocumentValidation) {
for (BSONElement bypassDocumentValidation : BSON_ARRAY(true << false << 1 << 0 << 1.0 << 0.0)) {
auto cmd = BSON("insert"
<< "bar"
- << "documents" << BSON_ARRAY(BSONObj()) << "bypassDocumentValidation"
+ << "documents"
+ << BSON_ARRAY(BSONObj())
+ << "bypassDocumentValidation"
<< bypassDocumentValidation);
auto op = parseInsertCommand("foo", cmd);
ASSERT_EQ(op.bypassDocumentValidation, shouldBypassDocumentValidationForCommand(cmd));
@@ -50,7 +52,10 @@ TEST(CommandWriteOpsParsers, CommonFields_Ordered) {
for (bool ordered : {true, false}) {
auto cmd = BSON("insert"
<< "bar"
- << "documents" << BSON_ARRAY(BSONObj()) << "ordered" << ordered);
+ << "documents"
+ << BSON_ARRAY(BSONObj())
+ << "ordered"
+ << ordered);
auto op = parseInsertCommand("foo", cmd);
ASSERT_EQ(op.continueOnError, !ordered);
}
@@ -60,45 +65,55 @@ TEST(CommandWriteOpsParsers, CommonFields_IgnoredFields) {
// These flags are ignored, so there is nothing to check other than that this doesn't throw.
auto cmd = BSON("insert"
<< "bar"
- << "documents" << BSON_ARRAY(BSONObj()) << "maxTimeMS" << 1000 << "shardVersion"
- << BSONObj() << "writeConcern" << BSONObj());
+ << "documents"
+ << BSON_ARRAY(BSONObj())
+ << "maxTimeMS"
+ << 1000
+ << "shardVersion"
+ << BSONObj()
+ << "writeConcern"
+ << BSONObj());
parseInsertCommand("foo", cmd);
}
TEST(CommandWriteOpsParsers, GarbageFieldsAtTopLevel) {
auto cmd = BSON("insert"
<< "bar"
- << "documents" << BSON_ARRAY(BSONObj()) << "GARBAGE" << 1);
+ << "documents"
+ << BSON_ARRAY(BSONObj())
+ << "GARBAGE"
+ << 1);
ASSERT_THROWS_CODE(parseInsertCommand("foo", cmd), UserException, ErrorCodes::FailedToParse);
}
TEST(CommandWriteOpsParsers, GarbageFieldsInUpdateDoc) {
- auto cmd =
- BSON("update"
- << "bar"
- << "updates" << BSON_ARRAY("q" << BSONObj() << "u" << BSONObj() << "GARBAGE" << 1));
+ auto cmd = BSON("update"
+ << "bar"
+ << "updates"
+ << BSON_ARRAY("q" << BSONObj() << "u" << BSONObj() << "GARBAGE" << 1));
ASSERT_THROWS_CODE(parseInsertCommand("foo", cmd), UserException, ErrorCodes::FailedToParse);
}
TEST(CommandWriteOpsParsers, GarbageFieldsInDeleteDoc) {
auto cmd = BSON("delete"
<< "bar"
- << "deletes" << BSON_ARRAY("q" << BSONObj() << "limit" << 0 << "GARBAGE" << 1));
+ << "deletes"
+ << BSON_ARRAY("q" << BSONObj() << "limit" << 0 << "GARBAGE" << 1));
}
TEST(CommandWriteOpsParsers, BadCollationFieldInUpdateDoc) {
- auto cmd =
- BSON("update"
- << "bar"
- << "updates" << BSON_ARRAY("q" << BSONObj() << "u" << BSONObj() << "collation" << 1));
+ auto cmd = BSON("update"
+ << "bar"
+ << "updates"
+ << BSON_ARRAY("q" << BSONObj() << "u" << BSONObj() << "collation" << 1));
ASSERT_THROWS_CODE(parseInsertCommand("foo", cmd), UserException, ErrorCodes::FailedToParse);
}
TEST(CommandWriteOpsParsers, BadCollationFieldInDeleteDoc) {
- auto cmd =
- BSON("delete"
- << "bar"
- << "deletes" << BSON_ARRAY("q" << BSONObj() << "limit" << 0 << "collation" << 1));
+ auto cmd = BSON("delete"
+ << "bar"
+ << "deletes"
+ << BSON_ARRAY("q" << BSONObj() << "limit" << 0 << "collation" << 1));
ASSERT_THROWS_CODE(parseInsertCommand("foo", cmd), UserException, ErrorCodes::FailedToParse);
}
@@ -144,8 +159,11 @@ TEST(CommandWriteOpsParsers, Update) {
for (bool multi : {false, true}) {
auto cmd = BSON("update" << ns.coll() << "updates"
<< BSON_ARRAY(BSON("q" << query << "u" << update << "collation"
- << collation << "upsert" << upsert
- << "multi" << multi)));
+ << collation
+ << "upsert"
+ << upsert
+ << "multi"
+ << multi)));
auto op = parseUpdateCommand(ns.db(), cmd);
ASSERT_EQ(op.ns.ns(), ns.ns());
ASSERT(!op.bypassDocumentValidation);
@@ -166,9 +184,10 @@ TEST(CommandWriteOpsParsers, Remove) {
const BSONObj collation = BSON("locale"
<< "en_US");
for (bool multi : {false, true}) {
- auto cmd = BSON("delete" << ns.coll() << "deletes"
- << BSON_ARRAY(BSON("q" << query << "collation" << collation
- << "limit" << (multi ? 0 : 1))));
+ auto cmd =
+ BSON("delete" << ns.coll() << "deletes"
+ << BSON_ARRAY(BSON("q" << query << "collation" << collation << "limit"
+ << (multi ? 0 : 1))));
auto op = parseDeleteCommand(ns.db(), cmd);
ASSERT_EQ(op.ns.ns(), ns.ns());
ASSERT(!op.bypassDocumentValidation);
@@ -185,7 +204,8 @@ TEST(CommandWriteOpsParsers, RemoveErrorsWithBadLimit) {
for (BSONElement limit : BSON_ARRAY(-1 << 2 << 0.5)) {
auto cmd = BSON("delete"
<< "bar"
- << "deletes" << BSON_ARRAY("q" << BSONObj() << "limit" << limit));
+ << "deletes"
+ << BSON_ARRAY("q" << BSONObj() << "limit" << limit));
ASSERT_THROWS_CODE(
parseInsertCommand("foo", cmd), UserException, ErrorCodes::FailedToParse);
}
diff --git a/src/mongo/db/pipeline/accumulator.cpp b/src/mongo/db/pipeline/accumulator.cpp
index f5b21798f5a..b2fc55a455e 100644
--- a/src/mongo/db/pipeline/accumulator.cpp
+++ b/src/mongo/db/pipeline/accumulator.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/pipeline/accumulator.h"
#include "mongo/db/pipeline/value.h"
-#include "mongo/util/string_map.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/string_map.h"
namespace mongo {
diff --git a/src/mongo/db/pipeline/accumulator.h b/src/mongo/db/pipeline/accumulator.h
index afcce43669e..7dd604b82b4 100644
--- a/src/mongo/db/pipeline/accumulator.h
+++ b/src/mongo/db/pipeline/accumulator.h
@@ -56,7 +56,7 @@ namespace mongo {
class Accumulator : public RefCountable {
public:
- using Factory = boost::intrusive_ptr<Accumulator>(*)();
+ using Factory = boost::intrusive_ptr<Accumulator> (*)();
Accumulator() = default;
diff --git a/src/mongo/db/pipeline/document_internal.h b/src/mongo/db/pipeline/document_internal.h
index 78f4dac02da..3ad0d1ee577 100644
--- a/src/mongo/db/pipeline/document_internal.h
+++ b/src/mongo/db/pipeline/document_internal.h
@@ -30,11 +30,11 @@
#include <third_party/murmurhash3/MurmurHash3.h>
-#include <boost/intrusive_ptr.hpp>
#include <bitset>
+#include <boost/intrusive_ptr.hpp>
-#include "mongo/util/intrusive_counter.h"
#include "mongo/db/pipeline/value.h"
+#include "mongo/util/intrusive_counter.h"
namespace mongo {
/** Helper class to make the position in a document abstract
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
index a2a534e7aca..6fc931c09f4 100644
--- a/src/mongo/db/pipeline/document_source.h
+++ b/src/mongo/db/pipeline/document_source.h
@@ -44,11 +44,11 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/matcher.h"
#include "mongo/db/pipeline/accumulator.h"
-#include "mongo/db/pipeline/lookup_set_cache.h"
#include "mongo/db/pipeline/dependencies.h"
#include "mongo/db/pipeline/document.h"
#include "mongo/db/pipeline/expression.h"
#include "mongo/db/pipeline/expression_context.h"
+#include "mongo/db/pipeline/lookup_set_cache.h"
#include "mongo/db/pipeline/pipeline.h"
#include "mongo/db/pipeline/value.h"
#include "mongo/db/sorter/sorter.h"
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 183ea58fda7..68cf11ba20e 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -125,10 +125,10 @@ void DocumentSourceCursor::loadBatch() {
<< WorkingSetCommon::toStatusString(obj),
state != PlanExecutor::DEAD);
- uassert(
- 17285,
- str::stream() << "cursor encountered an error: " << WorkingSetCommon::toStatusString(obj),
- state != PlanExecutor::FAILURE);
+ uassert(17285,
+ str::stream() << "cursor encountered an error: "
+ << WorkingSetCommon::toStatusString(obj),
+ state != PlanExecutor::FAILURE);
massert(17286,
str::stream() << "Unexpected return from PlanExecutor::getNext: " << state,
diff --git a/src/mongo/db/pipeline/document_source_geo_near.cpp b/src/mongo/db/pipeline/document_source_geo_near.cpp
index da30c2adaa3..35133d6a400 100644
--- a/src/mongo/db/pipeline/document_source_geo_near.cpp
+++ b/src/mongo/db/pipeline/document_source_geo_near.cpp
@@ -30,8 +30,8 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/pipeline/document_source.h"
#include "mongo/db/pipeline/document.h"
+#include "mongo/db/pipeline/document_source.h"
#include "mongo/util/log.h"
namespace mongo {
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.cpp b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
index f2821b9107c..3c5fccf47bd 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
@@ -358,9 +358,12 @@ void DocumentSourceGraphLookUp::checkMemoryUsage() {
void DocumentSourceGraphLookUp::serializeToArray(std::vector<Value>& array, bool explain) const {
// Serialize default options.
MutableDocument spec(DOC("from" << _from.coll() << "as" << _as.getPath(false)
- << "connectToField" << _connectToField.getPath(false)
- << "connectFromField" << _connectFromField.getPath(false)
- << "startWith" << _startWith->serialize(false)));
+ << "connectToField"
+ << _connectToField.getPath(false)
+ << "connectFromField"
+ << _connectFromField.getPath(false)
+ << "startWith"
+ << _startWith->serialize(false)));
// depthField is optional; serialize it if it was specified.
if (_depthField) {
@@ -376,7 +379,8 @@ void DocumentSourceGraphLookUp::serializeToArray(std::vector<Value>& array, bool
const boost::optional<FieldPath> indexPath = (*_unwind)->indexPath();
spec["unwinding"] =
Value(DOC("preserveNullAndEmptyArrays"
- << (*_unwind)->preserveNullAndEmptyArrays() << "includeArrayIndex"
+ << (*_unwind)->preserveNullAndEmptyArrays()
+ << "includeArrayIndex"
<< (indexPath ? Value((*indexPath).getPath(false)) : Value())));
}
@@ -432,14 +436,14 @@ intrusive_ptr<DocumentSource> DocumentSourceGraphLookUp::createFromBson(
<< typeName(argument.type()),
argument.isNumber());
maxDepth = argument.safeNumberLong();
- uassert(
- 40101,
- str::stream() << "maxDepth requires a nonnegative argument, found: " << *maxDepth,
- *maxDepth >= 0);
- uassert(
- 40102,
- str::stream() << "maxDepth could not be represented as a long long: " << *maxDepth,
- *maxDepth == argument.number());
+ uassert(40101,
+ str::stream() << "maxDepth requires a nonnegative argument, found: "
+ << *maxDepth,
+ *maxDepth >= 0);
+ uassert(40102,
+ str::stream() << "maxDepth could not be represented as a long long: "
+ << *maxDepth,
+ *maxDepth == argument.number());
continue;
}
@@ -447,8 +451,8 @@ intrusive_ptr<DocumentSource> DocumentSourceGraphLookUp::createFromBson(
argName == "depthField" || argName == "connectToField") {
// All remaining arguments to $graphLookup are expected to be strings.
uassert(40103,
- str::stream() << "expected string as argument for " << argName
- << ", found: " << argument.toString(false, false),
+ str::stream() << "expected string as argument for " << argName << ", found: "
+ << argument.toString(false, false),
argument.type() == String);
}
@@ -464,8 +468,8 @@ intrusive_ptr<DocumentSource> DocumentSourceGraphLookUp::createFromBson(
depthField = boost::optional<FieldPath>(FieldPath(argument.String()));
} else {
uasserted(40104,
- str::stream()
- << "Unknown argument to $graphLookup: " << argument.fieldName());
+ str::stream() << "Unknown argument to $graphLookup: "
+ << argument.fieldName());
}
}
diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp
index c4e6b72947a..277ea77d0fc 100644
--- a/src/mongo/db/pipeline/document_source_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup.cpp
@@ -93,9 +93,9 @@ boost::optional<Document> DocumentSourceLookUp::getNext() {
if (!_additionalFilter && _matchSrc) {
// We have internalized a $match, but have not yet computed the descended $match that should
// be applied to our queries.
- _additionalFilter = DocumentSourceMatch::descendMatchOnPath(_matchSrc->getMatchExpression(),
- _as.getPath(false),
- pExpCtx)->getQuery();
+ _additionalFilter = DocumentSourceMatch::descendMatchOnPath(
+ _matchSrc->getMatchExpression(), _as.getPath(false), pExpCtx)
+ ->getQuery();
}
if (_handlingUnwind) {
@@ -120,7 +120,8 @@ boost::optional<Document> DocumentSourceLookUp::getNext() {
objsize += result.objsize();
uassert(4568,
str::stream() << "Total size of documents in " << _fromNs.coll() << " matching "
- << query << " exceeds maximum document size",
+ << query
+ << " exceeds maximum document size",
objsize <= BSONObjMaxInternalSize);
results.push_back(Value(result));
}
@@ -224,23 +225,23 @@ Pipeline::SourceContainer::iterator DocumentSourceLookUp::optimizeAt(
}
bool isMatchOnlyOnAs = true;
- auto computeWhetherMatchOnAs =
- [&isMatchOnlyOnAs, &outputPath](MatchExpression* expression, std::string path) -> void {
- // If 'expression' is the child of a $elemMatch, we cannot internalize the $match. For
- // example, {b: {$elemMatch: {$gt: 1, $lt: 4}}}, where "b" is our "_as" field. This is
- // because there's no way to modify the expression to be a match just on 'b'--we cannot
- // change the path to an empty string, or remove the node entirely.
- if (expression->matchType() == MatchExpression::ELEM_MATCH_VALUE ||
- expression->matchType() == MatchExpression::ELEM_MATCH_OBJECT) {
- isMatchOnlyOnAs = false;
- }
- if (expression->numChildren() == 0) {
- // 'expression' is a leaf node; examine the path. It is important that 'outputPath'
- // not equal 'path', because we cannot change the expression {b: {$eq: 3}}, where
- // 'path' is 'b', to be a match on a subfield, since no subfield exists.
- isMatchOnlyOnAs = isMatchOnlyOnAs && expression::isPathPrefixOf(outputPath, path);
- }
- };
+ auto computeWhetherMatchOnAs = [&isMatchOnlyOnAs, &outputPath](MatchExpression* expression,
+ std::string path) -> void {
+ // If 'expression' is the child of a $elemMatch, we cannot internalize the $match. For
+ // example, {b: {$elemMatch: {$gt: 1, $lt: 4}}}, where "b" is our "_as" field. This is
+ // because there's no way to modify the expression to be a match just on 'b'--we cannot
+ // change the path to an empty string, or remove the node entirely.
+ if (expression->matchType() == MatchExpression::ELEM_MATCH_VALUE ||
+ expression->matchType() == MatchExpression::ELEM_MATCH_OBJECT) {
+ isMatchOnlyOnAs = false;
+ }
+ if (expression->numChildren() == 0) {
+ // 'expression' is a leaf node; examine the path. It is important that 'outputPath'
+ // not equal 'path', because we cannot change the expression {b: {$eq: 3}}, where
+ // 'path' is 'b', to be a match on a subfield, since no subfield exists.
+ isMatchOnlyOnAs = isMatchOnlyOnAs && expression::isPathPrefixOf(outputPath, path);
+ }
+ };
expression::mapOver(dependent->getMatchExpression(), computeWhetherMatchOnAs);
@@ -375,23 +376,27 @@ boost::optional<Document> DocumentSourceLookUp::unwindResult() {
void DocumentSourceLookUp::serializeToArray(std::vector<Value>& array, bool explain) const {
MutableDocument output(
DOC(getSourceName() << DOC("from" << _fromNs.coll() << "as" << _as.getPath(false)
- << "localField" << _localField.getPath(false)
- << "foreignField" << _foreignField.getPath(false))));
+ << "localField"
+ << _localField.getPath(false)
+ << "foreignField"
+ << _foreignField.getPath(false))));
if (explain) {
if (_handlingUnwind) {
const boost::optional<FieldPath> indexPath = _unwindSrc->indexPath();
output[getSourceName()]["unwinding"] =
Value(DOC("preserveNullAndEmptyArrays"
- << _unwindSrc->preserveNullAndEmptyArrays() << "includeArrayIndex"
+ << _unwindSrc->preserveNullAndEmptyArrays()
+ << "includeArrayIndex"
<< (indexPath ? Value(indexPath->getPath(false)) : Value())));
}
if (_matchSrc) {
// Our output does not have to be parseable, so include a "matching" field with the
// descended match expression.
- output[getSourceName()]["matching"] = Value(
- DocumentSourceMatch::descendMatchOnPath(
- _matchSrc->getMatchExpression(), _as.getPath(false), pExpCtx)->getQuery());
+ output[getSourceName()]["matching"] =
+ Value(DocumentSourceMatch::descendMatchOnPath(
+ _matchSrc->getMatchExpression(), _as.getPath(false), pExpCtx)
+ ->getQuery());
}
array.push_back(Value(output.freeze()));
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
index 21f7dab580c..b754caa3af9 100644
--- a/src/mongo/db/pipeline/document_source_match.cpp
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -404,31 +404,29 @@ boost::intrusive_ptr<DocumentSourceMatch> DocumentSourceMatch::descendMatchOnPat
MatchExpression* matchExpr,
const std::string& descendOn,
intrusive_ptr<ExpressionContext> expCtx) {
- expression::mapOver(matchExpr,
- [&descendOn](MatchExpression* node, std::string path) -> void {
- // Cannot call this method on a $match including a $elemMatch.
- invariant(node->matchType() != MatchExpression::ELEM_MATCH_OBJECT &&
- node->matchType() != MatchExpression::ELEM_MATCH_VALUE);
- // Logical nodes do not have a path, but both 'leaf' and 'array' nodes
- // do.
- if (node->isLogical()) {
- return;
- }
-
- auto leafPath = node->path();
- invariant(expression::isPathPrefixOf(descendOn, leafPath));
-
- auto newPath = leafPath.substr(descendOn.size() + 1);
- if (node->isLeaf() &&
- node->matchType() != MatchExpression::TYPE_OPERATOR &&
- node->matchType() != MatchExpression::WHERE) {
- auto leafNode = static_cast<LeafMatchExpression*>(node);
- leafNode->setPath(newPath);
- } else if (node->isArray()) {
- auto arrayNode = static_cast<ArrayMatchingMatchExpression*>(node);
- arrayNode->setPath(newPath);
- }
- });
+ expression::mapOver(matchExpr, [&descendOn](MatchExpression* node, std::string path) -> void {
+ // Cannot call this method on a $match including a $elemMatch.
+ invariant(node->matchType() != MatchExpression::ELEM_MATCH_OBJECT &&
+ node->matchType() != MatchExpression::ELEM_MATCH_VALUE);
+ // Logical nodes do not have a path, but both 'leaf' and 'array' nodes
+ // do.
+ if (node->isLogical()) {
+ return;
+ }
+
+ auto leafPath = node->path();
+ invariant(expression::isPathPrefixOf(descendOn, leafPath));
+
+ auto newPath = leafPath.substr(descendOn.size() + 1);
+ if (node->isLeaf() && node->matchType() != MatchExpression::TYPE_OPERATOR &&
+ node->matchType() != MatchExpression::WHERE) {
+ auto leafNode = static_cast<LeafMatchExpression*>(node);
+ leafNode->setPath(newPath);
+ } else if (node->isArray()) {
+ auto arrayNode = static_cast<ArrayMatchingMatchExpression*>(node);
+ arrayNode->setPath(newPath);
+ }
+ });
BSONObjBuilder query;
matchExpr->serialize(&query);
@@ -479,15 +477,13 @@ DocumentSource::GetDepsReturn DocumentSourceMatch::getDependencies(DepsTracker*
}
void DocumentSourceMatch::addDependencies(DepsTracker* deps) const {
- expression::mapOver(_expression.get(),
- [deps](MatchExpression* node, std::string path) -> void {
- if (!path.empty() &&
- (node->numChildren() == 0 ||
- node->matchType() == MatchExpression::ELEM_MATCH_VALUE ||
- node->matchType() == MatchExpression::ELEM_MATCH_OBJECT)) {
- deps->fields.insert(path);
- }
- });
+ expression::mapOver(_expression.get(), [deps](MatchExpression* node, std::string path) -> void {
+ if (!path.empty() &&
+ (node->numChildren() == 0 || node->matchType() == MatchExpression::ELEM_MATCH_VALUE ||
+ node->matchType() == MatchExpression::ELEM_MATCH_OBJECT)) {
+ deps->fields.insert(path);
+ }
+ });
}
DocumentSourceMatch::DocumentSourceMatch(const BSONObj& query,
diff --git a/src/mongo/db/pipeline/document_source_merge_cursors.cpp b/src/mongo/db/pipeline/document_source_merge_cursors.cpp
index 6056a2f7646..01f11cb0c9f 100644
--- a/src/mongo/db/pipeline/document_source_merge_cursors.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_cursors.cpp
@@ -84,9 +84,11 @@ intrusive_ptr<DocumentSource> DocumentSourceMergeCursors::createFromBson(
Value DocumentSourceMergeCursors::serialize(bool explain) const {
vector<Value> cursors;
for (size_t i = 0; i < _cursorDescriptors.size(); i++) {
- cursors.push_back(Value(
- DOC("host" << Value(_cursorDescriptors[i].connectionString.toString()) << "ns"
- << _cursorDescriptors[i].ns << "id" << _cursorDescriptors[i].cursorId)));
+ cursors.push_back(
+ Value(DOC("host" << Value(_cursorDescriptors[i].connectionString.toString()) << "ns"
+ << _cursorDescriptors[i].ns
+ << "id"
+ << _cursorDescriptors[i].cursorId)));
}
return Value(DOC(getSourceName() << Value(cursors)));
}
@@ -137,7 +139,8 @@ Document DocumentSourceMergeCursors::nextSafeFrom(DBClientCursor* cursor) {
const int code = next.hasField("code") ? next["code"].numberInt() : 17029;
uasserted(code,
str::stream() << "Received error in response from " << cursor->originalHost()
- << ": " << next);
+ << ": "
+ << next);
}
return Document::fromBsonWithMetaData(next);
}
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
index 148f7f4fc7e..252d894ba04 100644
--- a/src/mongo/db/pipeline/document_source_out.cpp
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -87,7 +87,8 @@ void DocumentSourceOut::prepTempCollection() {
bool ok = conn->runCommand(_outputNs.db().toString(), cmd.done(), info);
uassert(16994,
str::stream() << "failed to create temporary $out collection '" << _tempNs.ns()
- << "': " << info.toString(),
+ << "': "
+ << info.toString(),
ok);
}
@@ -103,7 +104,10 @@ void DocumentSourceOut::prepTempCollection() {
BSONObj err = conn->getLastErrorDetailed();
uassert(16995,
str::stream() << "copying index for $out failed."
- << " index: " << indexBson << " error: " << err,
+ << " index: "
+ << indexBson
+ << " error: "
+ << err,
DBClientWithCommands::getLastErrorString(err).empty());
}
}
diff --git a/src/mongo/db/pipeline/document_source_redact.cpp b/src/mongo/db/pipeline/document_source_redact.cpp
index b1d2c9e54f2..38f1e9fa8b8 100644
--- a/src/mongo/db/pipeline/document_source_redact.cpp
+++ b/src/mongo/db/pipeline/document_source_redact.cpp
@@ -150,7 +150,8 @@ boost::optional<Document> DocumentSourceRedact::redactObject() {
uasserted(17053,
str::stream() << "$redact's expression should not return anything "
<< "aside from the variables $$KEEP, $$DESCEND, and "
- << "$$PRUNE, but returned " << expressionResult.toString());
+ << "$$PRUNE, but returned "
+ << expressionResult.toString());
}
}
diff --git a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
index 802a5d05aab..dafae4ed111 100644
--- a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
@@ -106,9 +106,12 @@ boost::optional<Document> DocumentSourceSampleFromRandomCursor::getNextNonDuplic
uassert(
28793,
str::stream()
- << "The optimized $sample stage requires all documents have a " << _idField
+ << "The optimized $sample stage requires all documents have a "
+ << _idField
<< " field in order to de-duplicate results, but encountered a document without a "
- << _idField << " field: " << (*doc).toString(),
+ << _idField
+ << " field: "
+ << (*doc).toString(),
!idField.missing());
if (_seenDocs.insert(std::move(idField)).second) {
@@ -118,8 +121,9 @@ boost::optional<Document> DocumentSourceSampleFromRandomCursor::getNextNonDuplic
}
uasserted(28799,
str::stream() << "$sample stage could not find a non-duplicate document after "
- << kMaxAttempts << " while using a random cursor. This is likely a "
- "sporadic failure, please try again.");
+ << kMaxAttempts
+ << " while using a random cursor. This is likely a "
+ "sporadic failure, please try again.");
}
Value DocumentSourceSampleFromRandomCursor::serialize(bool explain) const {
diff --git a/src/mongo/db/pipeline/document_source_sort.cpp b/src/mongo/db/pipeline/document_source_sort.cpp
index f2e5dca0347..c9c21043497 100644
--- a/src/mongo/db/pipeline/document_source_sort.cpp
+++ b/src/mongo/db/pipeline/document_source_sort.cpp
@@ -76,7 +76,8 @@ void DocumentSourceSort::serializeToArray(vector<Value>& array, bool explain) co
array.push_back(
Value(DOC(getSourceName()
<< DOC("sortKey" << serializeSortKey(explain) << "mergePresorted"
- << (_mergingPresorted ? Value(true) : Value()) << "limit"
+ << (_mergingPresorted ? Value(true) : Value())
+ << "limit"
<< (limitSrc ? Value(limitSrc->getLimit()) : Value())))));
} else { // one Value for $sort and maybe a Value for $limit
MutableDocument inner(serializeSortKey(explain));
diff --git a/src/mongo/db/pipeline/document_source_test.cpp b/src/mongo/db/pipeline/document_source_test.cpp
index e0c6782883e..b5ed007d1f9 100644
--- a/src/mongo/db/pipeline/document_source_test.cpp
+++ b/src/mongo/db/pipeline/document_source_test.cpp
@@ -40,9 +40,9 @@
#include "mongo/db/storage/storage_options.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/stdx/memory.h"
+#include "mongo/unittest/temp_dir.h"
#include "mongo/util/clock_source_mock.h"
#include "mongo/util/tick_source_mock.h"
-#include "mongo/unittest/temp_dir.h"
namespace mongo {
bool isMongos() {
@@ -117,7 +117,7 @@ TEST(TruncateSort, TruncateSortDedupsSortCorrectly) {
}
template <size_t ArrayLen>
-set<string> arrayToSet(const char*(&array)[ArrayLen]) {
+set<string> arrayToSet(const char* (&array)[ArrayLen]) {
set<string> out;
for (size_t i = 0; i < ArrayLen; i++)
out.insert(array[i]);
@@ -837,8 +837,9 @@ class TwoValuesTwoKeys : public CheckResultsBase {
virtual BSONObj groupSpec() {
return BSON("_id"
<< "$_id"
- << "a" << BSON("$push"
- << "$a"));
+ << "a"
+ << BSON("$push"
+ << "$a"));
}
virtual string expectedResultSetString() {
return "[{_id:0,a:[1]},{_id:1,a:[2]}]";
@@ -856,8 +857,9 @@ class FourValuesTwoKeys : public CheckResultsBase {
virtual BSONObj groupSpec() {
return BSON("_id"
<< "$id"
- << "a" << BSON("$push"
- << "$a"));
+ << "a"
+ << BSON("$push"
+ << "$a"));
}
virtual string expectedResultSetString() {
return "[{_id:0,a:[1,3]},{_id:1,a:[2,4]}]";
@@ -875,8 +877,10 @@ class FourValuesTwoKeysTwoAccumulators : public CheckResultsBase {
virtual BSONObj groupSpec() {
return BSON("_id"
<< "$id"
- << "list" << BSON("$push"
- << "$a") << "sum"
+ << "list"
+ << BSON("$push"
+ << "$a")
+ << "sum"
<< BSON("$sum" << BSON("$divide" << BSON_ARRAY("$a" << 2))));
}
virtual string expectedResultSetString() {
@@ -892,8 +896,9 @@ class GroupNullUndefinedIds : public CheckResultsBase {
virtual BSONObj groupSpec() {
return BSON("_id"
<< "$a"
- << "sum" << BSON("$sum"
- << "$b"));
+ << "sum"
+ << BSON("$sum"
+ << "$b"));
}
virtual string expectedResultSetString() {
return "[{_id:null,sum:110}]";
@@ -957,8 +962,9 @@ public:
// Create a group source.
createGroup(BSON("_id"
<< "$x"
- << "list" << BSON("$push"
- << "$y")));
+ << "list"
+ << BSON("$push"
+ << "$y")));
// Create a merger version of the source.
intrusive_ptr<DocumentSource> group = createMerger();
// Attach the merger to the synthetic shard results.
@@ -2348,7 +2354,8 @@ private:
void createUnwind(bool preserveNullAndEmptyArrays, bool includeArrayIndex) {
auto specObj =
DOC("$unwind" << DOC("path" << unwindFieldPath() << "preserveNullAndEmptyArrays"
- << preserveNullAndEmptyArrays << "includeArrayIndex"
+ << preserveNullAndEmptyArrays
+ << "includeArrayIndex"
<< (includeArrayIndex ? Value(indexPath()) : Value())));
_unwind = static_cast<DocumentSourceUnwind*>(
DocumentSourceUnwind::createFromBson(specObj.toBson().firstElement(), ctx()).get());
@@ -2396,11 +2403,12 @@ private:
}
BSONObj expectedSerialization(bool preserveNullAndEmptyArrays, bool includeArrayIndex) const {
- return DOC("$unwind" << DOC(
- "path" << Value(unwindFieldPath()) << "preserveNullAndEmptyArrays"
- << (preserveNullAndEmptyArrays ? Value(true) : Value())
- << "includeArrayIndex"
- << (includeArrayIndex ? Value(indexPath()) : Value()))).toBson();
+ return DOC("$unwind" << DOC("path" << Value(unwindFieldPath())
+ << "preserveNullAndEmptyArrays"
+ << (preserveNullAndEmptyArrays ? Value(true) : Value())
+ << "includeArrayIndex"
+ << (includeArrayIndex ? Value(indexPath()) : Value())))
+ .toBson();
}
/** Assert that iterator state accessors consistently report the source is exhausted. */
@@ -2911,7 +2919,8 @@ TEST_F(InvalidUnwindSpec, NonDollarPrefixedPath) {
TEST_F(InvalidUnwindSpec, NonBoolPreserveNullAndEmptyArrays) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays" << 2))),
+ << "preserveNullAndEmptyArrays"
+ << 2))),
UserException,
28809);
}
@@ -2919,7 +2928,8 @@ TEST_F(InvalidUnwindSpec, NonBoolPreserveNullAndEmptyArrays) {
TEST_F(InvalidUnwindSpec, NonStringIncludeArrayIndex) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "includeArrayIndex" << 2))),
+ << "includeArrayIndex"
+ << 2))),
UserException,
28810);
}
@@ -2951,13 +2961,16 @@ TEST_F(InvalidUnwindSpec, DollarPrefixedIncludeArrayIndex) {
TEST_F(InvalidUnwindSpec, UnrecognizedOption) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays" << true
- << "foo" << 3))),
+ << "preserveNullAndEmptyArrays"
+ << true
+ << "foo"
+ << 3))),
UserException,
28811);
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "foo" << 3))),
+ << "foo"
+ << 3))),
UserException,
28811);
}
@@ -3306,9 +3319,8 @@ public:
match1->optimizeAt(container.begin(), &container);
ASSERT_EQUALS(container.size(), 1U);
ASSERT_EQUALS(match1->getQuery(),
- fromjson(
- "{'$and': [{'$and': [{a:1}, {b:1}]},"
- "{c:1}]}"));
+ fromjson("{'$and': [{'$and': [{a:1}, {b:1}]},"
+ "{c:1}]}"));
}
};
@@ -3367,7 +3379,8 @@ public:
<< "foreignField"
<< "c"
<< "as"
- << "d.e")).firstElement(),
+ << "d.e"))
+ .firstElement(),
ctx());
lookup->setSource(source.get());
@@ -3391,7 +3404,8 @@ public:
<< "foreignField"
<< "c"
<< "as"
- << "d")).firstElement(),
+ << "d"))
+ .firstElement(),
ctx());
lookup->setSource(source.get());
diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp
index bba51298389..58a4d979146 100644
--- a/src/mongo/db/pipeline/document_source_unwind.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind.cpp
@@ -309,7 +309,8 @@ intrusive_ptr<DocumentSource> DocumentSourceUnwind::createFromBson(
indexPath = subElem.String();
uassert(28822,
str::stream() << "includeArrayIndex option to $unwind stage should not be "
- "prefixed with a '$': " << (*indexPath),
+ "prefixed with a '$': "
+ << (*indexPath),
(*indexPath)[0] != '$');
} else {
uasserted(28811,
diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp
index 8024ec39cf2..9c68924d476 100644
--- a/src/mongo/db/pipeline/expression.cpp
+++ b/src/mongo/db/pipeline/expression.cpp
@@ -40,9 +40,9 @@
#include "mongo/db/pipeline/document.h"
#include "mongo/db/pipeline/expression_context.h"
#include "mongo/db/pipeline/value.h"
-#include "mongo/util/string_map.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/platform/bits.h"
+#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/string_map.h"
namespace mongo {
using Parser = Expression::Parser;
@@ -85,7 +85,9 @@ void Variables::uassertValidNameForUserWrite(StringData varName) {
uassert(16868,
str::stream() << "'" << varName << "' contains an invalid character "
- << "for a variable name: '" << varName[i] << "'",
+ << "for a variable name: '"
+ << varName[i]
+ << "'",
charIsValid);
}
}
@@ -110,7 +112,9 @@ void Variables::uassertValidNameForUserRead(StringData varName) {
uassert(16871,
str::stream() << "'" << varName << "' contains an invalid character "
- << "for a variable name: '" << varName[i] << "'",
+ << "for a variable name: '"
+ << varName[i]
+ << "'",
charIsValid);
}
}
@@ -182,7 +186,8 @@ bool Expression::ObjectCtx::inclusionOk() const {
string Expression::removeFieldPrefix(const string& prefixedField) {
uassert(16419,
str::stream() << "field path must not contain embedded null characters"
- << prefixedField.find("\0") << ",",
+ << prefixedField.find("\0")
+ << ",",
prefixedField.find('\0') == string::npos);
const char* pPrefixedField = prefixedField.c_str();
@@ -220,7 +225,8 @@ intrusive_ptr<Expression> Expression::parseObject(BSONObj obj,
uassert(
15983,
str::stream() << "the operator must be the only field in a pipeline object (at '"
- << pFieldName << "'",
+ << pFieldName
+ << "'",
fieldCount == 0);
uassert(16404,
@@ -234,7 +240,9 @@ intrusive_ptr<Expression> Expression::parseObject(BSONObj obj,
} else {
uassert(15990,
str::stream() << "this object is already an operator expression, and can't be "
- "used as a document expression (at '" << pFieldName << "')",
+ "used as a document expression (at '"
+ << pFieldName
+ << "')",
kind != OPERATOR);
uassert(16405,
@@ -299,7 +307,9 @@ intrusive_ptr<Expression> Expression::parseObject(BSONObj obj,
default:
uassert(15992,
str::stream() << "disallowed field type " << typeName(fieldType)
- << " in object expression (at '" << fieldName << "')",
+ << " in object expression (at '"
+ << fieldName
+ << "')",
false);
}
}
@@ -637,11 +647,13 @@ Value ExpressionArrayElemAt::evaluateInternal(Variables* vars) const {
array.isArray());
uassert(28690,
str::stream() << getOpName() << "'s second argument must be a numeric value,"
- << " but is " << typeName(indexArg.getType()),
+ << " but is "
+ << typeName(indexArg.getType()),
indexArg.numeric());
uassert(28691,
str::stream() << getOpName() << "'s second argument must be representable as"
- << " a 32-bit integer: " << indexArg.coerceToDouble(),
+ << " a 32-bit integer: "
+ << indexArg.coerceToDouble(),
indexArg.integral());
long long i = indexArg.coerceToLong();
@@ -969,8 +981,8 @@ intrusive_ptr<Expression> ExpressionDateToString::parse(BSONElement expr,
dateElem = arg;
} else {
uasserted(18534,
- str::stream()
- << "Unrecognized argument to $dateToString: " << arg.fieldName());
+ str::stream() << "Unrecognized argument to $dateToString: "
+ << arg.fieldName());
}
}
@@ -1070,7 +1082,8 @@ string ExpressionDateToString::formatDate(const string& format,
const int year = ExpressionYear::extract(tm);
uassert(18537,
str::stream() << "$dateToString is only defined on year 0-9999,"
- << " tried to use year " << year,
+ << " tried to use year "
+ << year,
(year >= 0) && (year <= 9999));
insertPadded(formatted, year, 4);
break;
@@ -1201,7 +1214,9 @@ Value ExpressionDivide::evaluateInternal(Variables* vars) const {
} else {
uasserted(16609,
str::stream() << "$divide only supports numeric types, not "
- << typeName(lhs.getType()) << " and " << typeName(rhs.getType()));
+ << typeName(lhs.getType())
+ << " and "
+ << typeName(rhs.getType()));
}
}
@@ -1683,8 +1698,9 @@ intrusive_ptr<Expression> ExpressionFilter::optimize() {
}
Value ExpressionFilter::serialize(bool explain) const {
- return Value(DOC("$filter" << DOC("input" << _input->serialize(explain) << "as" << _varName
- << "cond" << _filter->serialize(explain))));
+ return Value(
+ DOC("$filter" << DOC("input" << _input->serialize(explain) << "as" << _varName << "cond"
+ << _filter->serialize(explain))));
}
Value ExpressionFilter::evaluateInternal(Variables* vars) const {
@@ -2038,7 +2054,9 @@ Value ExpressionMod::evaluateInternal(Variables* vars) const {
} else {
uasserted(16611,
str::stream() << "$mod only supports numeric types, not "
- << typeName(lhs.getType()) << " and " << typeName(rhs.getType()));
+ << typeName(lhs.getType())
+ << " and "
+ << typeName(rhs.getType()));
}
}
@@ -2165,12 +2183,15 @@ void uassertIfNotIntegralAndNonNegative(Value val,
StringData argumentName) {
uassert(40096,
str::stream() << expressionName << "requires an integral " << argumentName
- << ", found a value of type: " << typeName(val.getType())
- << ", with value: " << val.toString(),
+ << ", found a value of type: "
+ << typeName(val.getType())
+ << ", with value: "
+ << val.toString(),
val.integral());
uassert(40097,
str::stream() << expressionName << " requires a nonnegative " << argumentName
- << ", found: " << val.toString(),
+ << ", found: "
+ << val.toString(),
val.coerceToInt() >= 0);
}
@@ -2796,7 +2817,8 @@ Value ExpressionRange::evaluateInternal(Variables* vars) const {
startVal.numeric());
uassert(34444,
str::stream() << "$range requires a starting value that can be represented as a 32-bit "
- "integer, found value: " << startVal.toString(),
+ "integer, found value: "
+ << startVal.toString(),
startVal.integral());
uassert(34445,
str::stream() << "$range requires a numeric ending value, found value of type: "
@@ -2804,7 +2826,8 @@ Value ExpressionRange::evaluateInternal(Variables* vars) const {
endVal.numeric());
uassert(34446,
str::stream() << "$range requires an ending value that can be represented as a 32-bit "
- "integer, found value: " << endVal.toString(),
+ "integer, found value: "
+ << endVal.toString(),
endVal.integral());
int current = startVal.coerceToInt();
@@ -2821,7 +2844,8 @@ Value ExpressionRange::evaluateInternal(Variables* vars) const {
stepVal.numeric());
uassert(34448,
str::stream() << "$range requires a step value that can be represented as a 32-bit "
- "integer, found value: " << stepVal.toString(),
+ "integer, found value: "
+ << stepVal.toString(),
stepVal.integral());
step = stepVal.coerceToInt();
@@ -2984,11 +3008,13 @@ Value ExpressionSetDifference::evaluateInternal(Variables* vars) const {
uassert(17048,
str::stream() << "both operands of $setDifference must be arrays. First "
- << "argument is of type: " << typeName(lhs.getType()),
+ << "argument is of type: "
+ << typeName(lhs.getType()),
lhs.isArray());
uassert(17049,
str::stream() << "both operands of $setDifference must be arrays. Second "
- << "argument is of type: " << typeName(rhs.getType()),
+ << "argument is of type: "
+ << typeName(rhs.getType()),
rhs.isArray());
ValueSet rhsSet = arrayToSet(rhs);
@@ -3026,7 +3052,8 @@ Value ExpressionSetEquals::evaluateInternal(Variables* vars) const {
const Value nextEntry = vpOperand[i]->evaluateInternal(vars);
uassert(17044,
str::stream() << "All operands of $setEquals must be arrays. One "
- << "argument is of type: " << typeName(nextEntry.getType()),
+ << "argument is of type: "
+ << typeName(nextEntry.getType()),
nextEntry.isArray());
if (i == 0) {
@@ -3058,7 +3085,8 @@ Value ExpressionSetIntersection::evaluateInternal(Variables* vars) const {
}
uassert(17047,
str::stream() << "All operands of $setIntersection must be arrays. One "
- << "argument is of type: " << typeName(nextEntry.getType()),
+ << "argument is of type: "
+ << typeName(nextEntry.getType()),
nextEntry.isArray());
if (i == 0) {
@@ -3113,11 +3141,13 @@ Value ExpressionSetIsSubset::evaluateInternal(Variables* vars) const {
uassert(17046,
str::stream() << "both operands of $setIsSubset must be arrays. First "
- << "argument is of type: " << typeName(lhs.getType()),
+ << "argument is of type: "
+ << typeName(lhs.getType()),
lhs.isArray());
uassert(17042,
str::stream() << "both operands of $setIsSubset must be arrays. Second "
- << "argument is of type: " << typeName(rhs.getType()),
+ << "argument is of type: "
+ << typeName(rhs.getType()),
rhs.isArray());
return setIsSubsetHelper(lhs.getArray(), arrayToSet(rhs));
@@ -3142,7 +3172,8 @@ public:
uassert(17310,
str::stream() << "both operands of $setIsSubset must be arrays. First "
- << "argument is of type: " << typeName(lhs.getType()),
+ << "argument is of type: "
+ << typeName(lhs.getType()),
lhs.isArray());
return setIsSubsetHelper(lhs.getArray(), _cachedRhsSet);
@@ -3164,7 +3195,8 @@ intrusive_ptr<Expression> ExpressionSetIsSubset::optimize() {
const Value rhs = ec->getValue();
uassert(17311,
str::stream() << "both operands of $setIsSubset must be arrays. Second "
- << "argument is of type: " << typeName(rhs.getType()),
+ << "argument is of type: "
+ << typeName(rhs.getType()),
rhs.isArray());
return new Optimized(arrayToSet(rhs), vpOperand);
@@ -3189,7 +3221,8 @@ Value ExpressionSetUnion::evaluateInternal(Variables* vars) const {
}
uassert(17043,
str::stream() << "All operands of $setUnion must be arrays. One argument"
- << " is of type: " << typeName(newEntries.getType()),
+ << " is of type: "
+ << typeName(newEntries.getType()),
newEntries.isArray());
unionedSet.insert(newEntries.getArray().begin(), newEntries.getArray().end());
@@ -3229,15 +3262,18 @@ Value ExpressionSlice::evaluateInternal(Variables* vars) const {
uassert(28724,
str::stream() << "First argument to $slice must be an array, but is"
- << " of type: " << typeName(arrayVal.getType()),
+ << " of type: "
+ << typeName(arrayVal.getType()),
arrayVal.isArray());
uassert(28725,
str::stream() << "Second argument to $slice must be a numeric value,"
- << " but is of type: " << typeName(arg2.getType()),
+ << " but is of type: "
+ << typeName(arg2.getType()),
arg2.numeric());
uassert(28726,
str::stream() << "Second argument to $slice can't be represented as"
- << " a 32-bit integer: " << arg2.coerceToDouble(),
+ << " a 32-bit integer: "
+ << arg2.coerceToDouble(),
arg2.integral());
const auto& array = arrayVal.getArray();
@@ -3277,11 +3313,13 @@ Value ExpressionSlice::evaluateInternal(Variables* vars) const {
uassert(28727,
str::stream() << "Third argument to $slice must be numeric, but "
- << "is of type: " << typeName(countVal.getType()),
+ << "is of type: "
+ << typeName(countVal.getType()),
countVal.numeric());
uassert(28728,
str::stream() << "Third argument to $slice can't be represented"
- << " as a 32-bit integer: " << countVal.coerceToDouble(),
+ << " as a 32-bit integer: "
+ << countVal.coerceToDouble(),
countVal.integral());
uassert(28729,
str::stream() << "Third argument to $slice must be positive: "
@@ -3329,11 +3367,13 @@ Value ExpressionSplit::evaluateInternal(Variables* vars) const {
uassert(40085,
str::stream() << "$split requires an expression that evaluates to a string as a first "
- "argument, found: " << typeName(inputArg.getType()),
+ "argument, found: "
+ << typeName(inputArg.getType()),
inputArg.getType() == BSONType::String);
uassert(40086,
str::stream() << "$split requires an expression that evaluates to a string as a second "
- "argument, found: " << typeName(separatorArg.getType()),
+ "argument, found: "
+ << typeName(separatorArg.getType()),
separatorArg.getType() == BSONType::String);
std::string input = inputArg.getString();
@@ -3421,12 +3461,14 @@ Value ExpressionSubstrBytes::evaluateInternal(Variables* vars) const {
uassert(16034,
str::stream() << getOpName()
<< ": starting index must be a numeric type (is BSON type "
- << typeName(pLower.getType()) << ")",
+ << typeName(pLower.getType())
+ << ")",
(pLower.getType() == NumberInt || pLower.getType() == NumberLong ||
pLower.getType() == NumberDouble));
uassert(16035,
str::stream() << getOpName() << ": length must be a numeric type (is BSON type "
- << typeName(pLength.getType()) << ")",
+ << typeName(pLength.getType())
+ << ")",
(pLength.getType() == NumberInt || pLength.getType() == NumberLong ||
pLength.getType() == NumberDouble));
@@ -3471,7 +3513,8 @@ Value ExpressionSubstrCP::evaluateInternal(Variables* vars) const {
std::string str = inputVal.coerceToString();
uassert(34450,
str::stream() << getOpName() << ": starting index must be a numeric type (is BSON type "
- << typeName(lowerVal.getType()) << ")",
+ << typeName(lowerVal.getType())
+ << ")",
lowerVal.numeric());
uassert(34451,
str::stream() << getOpName()
@@ -3480,7 +3523,8 @@ Value ExpressionSubstrCP::evaluateInternal(Variables* vars) const {
lowerVal.integral());
uassert(34452,
str::stream() << getOpName() << ": length must be a numeric type (is BSON type "
- << typeName(lengthVal.getType()) << ")",
+ << typeName(lengthVal.getType())
+ << ")",
lengthVal.numeric());
uassert(34453,
str::stream() << getOpName()
@@ -3562,10 +3606,10 @@ const char* ExpressionStrLenBytes::getOpName() const {
Value ExpressionStrLenCP::evaluateInternal(Variables* vars) const {
Value val(vpOperand[0]->evaluateInternal(vars));
- uassert(
- 34471,
- str::stream() << "$strLenCP requires a string argument, found: " << typeName(val.getType()),
- val.getType() == String);
+ uassert(34471,
+ str::stream() << "$strLenCP requires a string argument, found: "
+ << typeName(val.getType()),
+ val.getType() == String);
std::string stringVal = val.getString();
@@ -4076,10 +4120,10 @@ Value ExpressionZip::evaluateInternal(Variables* vars) const {
return Value(BSONNULL);
}
- uassert(
- 34468,
- str::stream() << "$zip found a non-array expression in input: " << evalExpr.toString(),
- evalExpr.isArray());
+ uassert(34468,
+ str::stream() << "$zip found a non-array expression in input: "
+ << evalExpr.toString(),
+ evalExpr.isArray());
inputValues.push_back(evalExpr.getArray());
@@ -4136,14 +4180,16 @@ boost::intrusive_ptr<Expression> ExpressionZip::optimize() {
std::transform(_inputs.begin(),
_inputs.end(),
_inputs.begin(),
- [](intrusive_ptr<Expression> inputExpression)
- -> intrusive_ptr<Expression> { return inputExpression->optimize(); });
+ [](intrusive_ptr<Expression> inputExpression) -> intrusive_ptr<Expression> {
+ return inputExpression->optimize();
+ });
std::transform(_defaults.begin(),
_defaults.end(),
_defaults.begin(),
- [](intrusive_ptr<Expression> defaultExpression)
- -> intrusive_ptr<Expression> { return defaultExpression->optimize(); });
+ [](intrusive_ptr<Expression> defaultExpression) -> intrusive_ptr<Expression> {
+ return defaultExpression->optimize();
+ });
return this;
}
@@ -4162,19 +4208,21 @@ Value ExpressionZip::serialize(bool explain) const {
}
return Value(DOC("$zip" << DOC("inputs" << Value(serializedInput) << "defaults"
- << Value(serializedDefaults) << "useLongestLength"
+ << Value(serializedDefaults)
+ << "useLongestLength"
<< serializedUseLongestLength)));
}
void ExpressionZip::addDependencies(DepsTracker* deps, std::vector<std::string>* path) const {
- std::for_each(_inputs.begin(),
- _inputs.end(),
- [&deps](intrusive_ptr<Expression> inputExpression)
- -> void { inputExpression->addDependencies(deps); });
+ std::for_each(
+ _inputs.begin(), _inputs.end(), [&deps](intrusive_ptr<Expression> inputExpression) -> void {
+ inputExpression->addDependencies(deps);
+ });
std::for_each(_defaults.begin(),
_defaults.end(),
- [&deps](intrusive_ptr<Expression> defaultExpression)
- -> void { defaultExpression->addDependencies(deps); });
+ [&deps](intrusive_ptr<Expression> defaultExpression) -> void {
+ defaultExpression->addDependencies(deps);
+ });
}
const char* ExpressionZip::getOpName() const {
diff --git a/src/mongo/db/pipeline/expression.h b/src/mongo/db/pipeline/expression.h
index 87a9536e689..8ed6f95f6eb 100644
--- a/src/mongo/db/pipeline/expression.h
+++ b/src/mongo/db/pipeline/expression.h
@@ -394,10 +394,14 @@ class ExpressionRangedArity : public ExpressionNaryBase<SubClass> {
public:
void validateArguments(const Expression::ExpressionVector& args) const override {
uassert(28667,
- mongoutils::str::stream()
- << "Expression " << this->getOpName() << " takes at least " << MinArgs
- << " arguments, and at most " << MaxArgs << ", but " << args.size()
- << " were passed in.",
+ mongoutils::str::stream() << "Expression " << this->getOpName()
+ << " takes at least "
+ << MinArgs
+ << " arguments, and at most "
+ << MaxArgs
+ << ", but "
+ << args.size()
+ << " were passed in.",
MinArgs <= args.size() && args.size() <= MaxArgs);
}
};
@@ -409,7 +413,9 @@ public:
void validateArguments(const Expression::ExpressionVector& args) const override {
uassert(16020,
mongoutils::str::stream() << "Expression " << this->getOpName() << " takes exactly "
- << NArgs << " arguments. " << args.size()
+ << NArgs
+ << " arguments. "
+ << args.size()
<< " were passed in.",
args.size() == NArgs);
}
diff --git a/src/mongo/db/pipeline/expression_test.cpp b/src/mongo/db/pipeline/expression_test.cpp
index d4b566e2bb4..2487bfc18f1 100644
--- a/src/mongo/db/pipeline/expression_test.cpp
+++ b/src/mongo/db/pipeline/expression_test.cpp
@@ -528,8 +528,8 @@ TEST_F(ExpressionNaryTest, FlattenInnerOperandsOptimizationOnAssociativeOnlyMidd
intrusive_ptr<Expression> optimized = _associativeOnly->optimize();
ASSERT(_associativeOnly == optimized);
- BSONArray expectedContent = BSON_ARRAY(200 << "$path3" << BSON_ARRAY(201 << 100) << "$path1"
- << BSON_ARRAY(101 << 99) << "$path2");
+ BSONArray expectedContent = BSON_ARRAY(
+ 200 << "$path3" << BSON_ARRAY(201 << 100) << "$path1" << BSON_ARRAY(101 << 99) << "$path2");
assertContents(_associativeOnly, expectedContent);
}
@@ -1368,7 +1368,8 @@ class NonConstantZero : public OptimizeBase {
class NonConstantNonConstantOne : public OptimizeBase {
BSONObj spec() {
return BSON("$and" << BSON_ARRAY("$a"
- << "$b" << 1));
+ << "$b"
+ << 1));
}
BSONObj expectedOptimized() {
return BSON("$and" << BSON_ARRAY("$a"
@@ -1380,7 +1381,8 @@ class NonConstantNonConstantOne : public OptimizeBase {
class NonConstantNonConstantZero : public OptimizeBase {
BSONObj spec() {
return BSON("$and" << BSON_ARRAY("$a"
- << "$b" << 0));
+ << "$b"
+ << 0));
}
BSONObj expectedOptimized() {
return BSON("$const" << false);
@@ -2250,12 +2252,11 @@ public:
void run() {
intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b.c");
assertBinaryEqual(fromjson("{'':[[1,2],3,[4],[[5]],[6,7]]}"),
- toBson(expression->evaluate(fromBson(fromjson(
- "{a:[{b:[{c:1},{c:2}]},"
- "{b:{c:3}},"
- "{b:[{c:4}]},"
- "{b:[{c:[5]}]},"
- "{b:{c:[6,7]}}]}")))));
+ toBson(expression->evaluate(fromBson(fromjson("{a:[{b:[{c:1},{c:2}]},"
+ "{b:{c:3}},"
+ "{b:[{c:4}]},"
+ "{b:[{c:[5]}]},"
+ "{b:{c:[6,7]}}]}")))));
}
};
@@ -3386,7 +3387,8 @@ class NonConstantZero : public OptimizeBase {
class NonConstantNonConstantOne : public OptimizeBase {
BSONObj spec() {
return BSON("$or" << BSON_ARRAY("$a"
- << "$b" << 1));
+ << "$b"
+ << 1));
}
BSONObj expectedOptimized() {
return BSON("$const" << true);
@@ -3397,7 +3399,8 @@ class NonConstantNonConstantOne : public OptimizeBase {
class NonConstantNonConstantZero : public OptimizeBase {
BSONObj spec() {
return BSON("$or" << BSON_ARRAY("$a"
- << "$b" << 0));
+ << "$b"
+ << 0));
}
BSONObj expectedOptimized() {
return BSON("$or" << BSON_ARRAY("$a"
@@ -3996,13 +3999,15 @@ public:
const BSONObj obj = BSON(asserters[i].getString() << args);
VariablesIdGenerator idGenerator;
VariablesParseState vps(&idGenerator);
- ASSERT_THROWS({
- // NOTE: parse and evaluatation failures are treated the
- // same
- const intrusive_ptr<Expression> expr =
- Expression::parseExpression(obj.firstElement(), vps);
- expr->evaluate(Document());
- }, UserException);
+ ASSERT_THROWS(
+ {
+ // NOTE: parse and evaluatation failures are treated the
+ // same
+ const intrusive_ptr<Expression> expr =
+ Expression::parseExpression(obj.firstElement(), vps);
+ expr->evaluate(Document());
+ },
+ UserException);
}
}
}
@@ -4015,9 +4020,12 @@ class Same : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection" << DOC_ARRAY(1 << 2)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>()));
+ << "$setIntersection"
+ << DOC_ARRAY(1 << 2)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << vector<Value>()));
}
};
@@ -4025,9 +4033,12 @@ class Redundant : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1 << 2 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection" << DOC_ARRAY(1 << 2)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>()));
+ << "$setIntersection"
+ << DOC_ARRAY(1 << 2)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << vector<Value>()));
}
};
@@ -4036,8 +4047,11 @@ class DoubleRedundant : public ExpectedResultBase {
return DOC(
"input" << DOC_ARRAY(DOC_ARRAY(1 << 1 << 2) << DOC_ARRAY(1 << 2 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true << "$setIntersection"
- << DOC_ARRAY(1 << 2) << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>()));
+ << DOC_ARRAY(1 << 2)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << vector<Value>()));
}
};
@@ -4045,9 +4059,12 @@ class Super : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection" << DOC_ARRAY(1)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << DOC_ARRAY(2)));
+ << "$setIntersection"
+ << DOC_ARRAY(1)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << DOC_ARRAY(2)));
}
};
@@ -4055,9 +4072,12 @@ class SuperWithRedundant : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 2) << DOC_ARRAY(1)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection" << DOC_ARRAY(1)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << DOC_ARRAY(2)));
+ << "$setIntersection"
+ << DOC_ARRAY(1)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << DOC_ARRAY(2)));
}
};
@@ -4065,9 +4085,12 @@ class Sub : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << false
- << "$setIntersection" << DOC_ARRAY(1)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>()));
+ << "$setIntersection"
+ << DOC_ARRAY(1)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << vector<Value>()));
}
};
@@ -4075,9 +4098,12 @@ class SameBackwards : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(2 << 1)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection" << DOC_ARRAY(1 << 2)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>()));
+ << "$setIntersection"
+ << DOC_ARRAY(1 << 2)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << vector<Value>()));
}
};
@@ -4085,9 +4111,12 @@ class NoOverlap : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(8 << 4)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection" << vector<Value>()
- << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
- << "$setDifference" << DOC_ARRAY(1 << 2)));
+ << "$setIntersection"
+ << vector<Value>()
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2 << 4 << 8)
+ << "$setDifference"
+ << DOC_ARRAY(1 << 2)));
}
};
@@ -4095,9 +4124,12 @@ class Overlap : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(8 << 2 << 4)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection" << DOC_ARRAY(2)
- << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
- << "$setDifference" << DOC_ARRAY(1)));
+ << "$setIntersection"
+ << DOC_ARRAY(2)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2 << 4 << 8)
+ << "$setDifference"
+ << DOC_ARRAY(1)));
}
};
@@ -4105,7 +4137,9 @@ class LastNull : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << Value(BSONNULL)) << "expected"
<< DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
- << "$setDifference" << BSONNULL) << "error"
+ << "$setDifference"
+ << BSONNULL)
+ << "error"
<< DOC_ARRAY("$setEquals"
<< "$setIsSubset"));
}
@@ -4115,7 +4149,9 @@ class FirstNull : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(Value(BSONNULL) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
- << "$setDifference" << BSONNULL) << "error"
+ << "$setDifference"
+ << BSONNULL)
+ << "error"
<< DOC_ARRAY("$setEquals"
<< "$setIsSubset"));
}
@@ -4126,9 +4162,10 @@ class NoArg : public ExpectedResultBase {
return DOC(
"input" << vector<Value>() << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion" << vector<Value>())
- << "error" << DOC_ARRAY("$setEquals"
- << "$setIsSubset"
- << "$setDifference"));
+ << "error"
+ << DOC_ARRAY("$setEquals"
+ << "$setIsSubset"
+ << "$setDifference"));
}
};
@@ -4136,7 +4173,8 @@ class OneArg : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIntersection" << DOC_ARRAY(1 << 2) << "$setUnion"
- << DOC_ARRAY(1 << 2)) << "error"
+ << DOC_ARRAY(1 << 2))
+ << "error"
<< DOC_ARRAY("$setEquals"
<< "$setIsSubset"
<< "$setDifference"));
@@ -4148,9 +4186,10 @@ class EmptyArg : public ExpectedResultBase {
return DOC(
"input" << DOC_ARRAY(vector<Value>()) << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion" << vector<Value>())
- << "error" << DOC_ARRAY("$setEquals"
- << "$setIsSubset"
- << "$setDifference"));
+ << "error"
+ << DOC_ARRAY("$setEquals"
+ << "$setIsSubset"
+ << "$setDifference"));
}
};
@@ -4158,8 +4197,12 @@ class LeftArgEmpty : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(vector<Value>() << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion"
- << DOC_ARRAY(1 << 2) << "$setIsSubset" << true
- << "$setEquals" << false << "$setDifference"
+ << DOC_ARRAY(1 << 2)
+ << "$setIsSubset"
+ << true
+ << "$setEquals"
+ << false
+ << "$setDifference"
<< vector<Value>()));
}
};
@@ -4168,8 +4211,12 @@ class RightArgEmpty : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << vector<Value>()) << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion"
- << DOC_ARRAY(1 << 2) << "$setIsSubset" << false
- << "$setEquals" << false << "$setDifference"
+ << DOC_ARRAY(1 << 2)
+ << "$setIsSubset"
+ << false
+ << "$setEquals"
+ << false
+ << "$setDifference"
<< DOC_ARRAY(1 << 2)));
}
};
@@ -4177,27 +4224,34 @@ class RightArgEmpty : public ExpectedResultBase {
class ManyArgs : public ExpectedResultBase {
Document getSpec() {
return DOC(
- "input" << DOC_ARRAY(DOC_ARRAY(8 << 3)
- << DOC_ARRAY("asdf"
- << "foo") << DOC_ARRAY(80.3 << 34) << vector<Value>()
- << DOC_ARRAY(80.3 << "foo" << 11 << "yay")) << "expected"
- << DOC("$setIntersection"
- << vector<Value>() << "$setEquals" << false << "$setUnion"
- << DOC_ARRAY(3 << 8 << 11 << 34 << 80.3 << "asdf"
- << "foo"
- << "yay")) << "error" << DOC_ARRAY("$setIsSubset"
- << "$setDifference"));
+ "input" << DOC_ARRAY(DOC_ARRAY(8 << 3) << DOC_ARRAY("asdf"
+ << "foo")
+ << DOC_ARRAY(80.3 << 34)
+ << vector<Value>()
+ << DOC_ARRAY(80.3 << "foo" << 11 << "yay"))
+ << "expected"
+ << DOC("$setIntersection" << vector<Value>() << "$setEquals" << false
+ << "$setUnion"
+ << DOC_ARRAY(3 << 8 << 11 << 34 << 80.3 << "asdf"
+ << "foo"
+ << "yay"))
+ << "error"
+ << DOC_ARRAY("$setIsSubset"
+ << "$setDifference"));
}
};
class ManyArgsEqual : public ExpectedResultBase {
Document getSpec() {
- return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 4)
- << DOC_ARRAY(1 << 2 << 2 << 4) << DOC_ARRAY(4 << 1 << 2)
- << DOC_ARRAY(2 << 1 << 1 << 4)) << "expected"
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 4) << DOC_ARRAY(1 << 2 << 2 << 4)
+ << DOC_ARRAY(4 << 1 << 2)
+ << DOC_ARRAY(2 << 1 << 1 << 4))
+ << "expected"
<< DOC("$setIntersection" << DOC_ARRAY(1 << 2 << 4) << "$setEquals"
- << true << "$setUnion"
- << DOC_ARRAY(1 << 2 << 4)) << "error"
+ << true
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2 << 4))
+ << "error"
<< DOC_ARRAY("$setIsSubset"
<< "$setDifference"));
}
@@ -4757,13 +4811,15 @@ public:
const BSONObj obj = BSON(asserters[i].getString() << args);
VariablesIdGenerator idGenerator;
VariablesParseState vps(&idGenerator);
- ASSERT_THROWS({
- // NOTE: parse and evaluatation failures are treated the
- // same
- const intrusive_ptr<Expression> expr =
- Expression::parseExpression(obj.firstElement(), vps);
- expr->evaluate(Document());
- }, UserException);
+ ASSERT_THROWS(
+ {
+ // NOTE: parse and evaluatation failures are treated the
+ // same
+ const intrusive_ptr<Expression> expr =
+ Expression::parseExpression(obj.firstElement(), vps);
+ expr->evaluate(Document());
+ },
+ UserException);
}
}
}
diff --git a/src/mongo/db/pipeline/field_path_test.cpp b/src/mongo/db/pipeline/field_path_test.cpp
index 63c0216a76d..92ba167f562 100644
--- a/src/mongo/db/pipeline/field_path_test.cpp
+++ b/src/mongo/db/pipeline/field_path_test.cpp
@@ -29,9 +29,9 @@
#include "mongo/platform/basic.h"
#include "mongo/db/pipeline/field_path.h"
+#include "mongo/dbtests/dbtests.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
-#include "mongo/dbtests/dbtests.h"
namespace mongo {
using std::string;
diff --git a/src/mongo/db/pipeline/lookup_set_cache.h b/src/mongo/db/pipeline/lookup_set_cache.h
index a40ac28155b..3150d7bc1af 100644
--- a/src/mongo/db/pipeline/lookup_set_cache.h
+++ b/src/mongo/db/pipeline/lookup_set_cache.h
@@ -29,15 +29,15 @@
#include "mongo/platform/basic.h"
-#include <unordered_map>
-#include <unordered_set>
-#include <iostream>
#include <boost/intrusive_ptr.hpp>
-#include <boost/multi_index_container.hpp>
#include <boost/multi_index/hashed_index.hpp>
#include <boost/multi_index/member.hpp>
#include <boost/multi_index/sequenced_index.hpp>
+#include <boost/multi_index_container.hpp>
#include <boost/optional.hpp>
+#include <iostream>
+#include <unordered_map>
+#include <unordered_set>
#include "mongo/bson/bsonobj.h"
#include "mongo/db/pipeline/value.h"
diff --git a/src/mongo/db/pipeline/lookup_set_cache_test.cpp b/src/mongo/db/pipeline/lookup_set_cache_test.cpp
index 4d5ec28ad56..2903a3632f5 100644
--- a/src/mongo/db/pipeline/lookup_set_cache_test.cpp
+++ b/src/mongo/db/pipeline/lookup_set_cache_test.cpp
@@ -28,9 +28,9 @@
#include "mongo/platform/basic.h"
+#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/pipeline/lookup_set_cache.h"
#include "mongo/unittest/unittest.h"
-#include "mongo/bson/bsonobjbuilder.h"
namespace mongo {
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index 32285652e24..9f5d6e1fd67 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -499,7 +499,8 @@ void Pipeline::run(BSONObjBuilder& result) {
// object will be too large, assert. the extra 1KB is for headers
uassert(16389,
str::stream() << "aggregation result exceeds maximum document size ("
- << BSONObjMaxUserSize / (1024 * 1024) << "MB)",
+ << BSONObjMaxUserSize / (1024 * 1024)
+ << "MB)",
resultArray.len() < BSONObjMaxUserSize - 1024);
}
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 6e4c8c817a3..f30e513a2f4 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -37,13 +37,13 @@
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/dbdirectclient.h"
#include "mongo/db/exec/fetch.h"
#include "mongo/db/exec/index_iterator.h"
#include "mongo/db/exec/multi_iterator.h"
#include "mongo/db/exec/shard_filter.h"
#include "mongo/db/exec/working_set.h"
-#include "mongo/db/db_raii.h"
-#include "mongo/db/dbdirectclient.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/pipeline/document_source.h"
@@ -51,11 +51,11 @@
#include "mongo/db/query/collation/collation_serializer.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/query_planner.h"
+#include "mongo/db/s/sharded_connection_info.h"
+#include "mongo/db/s/sharding_state.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/sorted_data_interface.h"
-#include "mongo/db/s/sharded_connection_info.h"
-#include "mongo/db/s/sharding_state.h"
#include "mongo/s/chunk_version.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
@@ -84,9 +84,8 @@ public:
bool isSharded(const NamespaceString& ns) final {
const ChunkVersion unsharded(0, 0, OID());
- return !(ShardingState::get(_ctx->opCtx)
- ->getVersion(ns.ns())
- .isWriteCompatibleWith(unsharded));
+ return !(
+ ShardingState::get(_ctx->opCtx)->getVersion(ns.ns()).isWriteCompatibleWith(unsharded));
}
bool isCapped(const NamespaceString& ns) final {
diff --git a/src/mongo/db/pipeline/value.cpp b/src/mongo/db/pipeline/value.cpp
index 9d98f8d6f02..8a37c51067c 100644
--- a/src/mongo/db/pipeline/value.cpp
+++ b/src/mongo/db/pipeline/value.cpp
@@ -30,9 +30,9 @@
#include "mongo/db/pipeline/value.h"
+#include <boost/functional/hash.hpp>
#include <cmath>
#include <limits>
-#include <boost/functional/hash.hpp>
#include "mongo/base/compare_numbers.h"
#include "mongo/base/data_type_endian.h"
diff --git a/src/mongo/db/pipeline/value.h b/src/mongo/db/pipeline/value.h
index 19b951f852c..c6d4b90c0cd 100644
--- a/src/mongo/db/pipeline/value.h
+++ b/src/mongo/db/pipeline/value.h
@@ -303,8 +303,7 @@ inline void swap(mongo::Value& lhs, mongo::Value& rhs) {
class ImplicitValue : public Value {
public:
template <typename T>
- ImplicitValue(T arg)
- : Value(std::move(arg)) {}
+ ImplicitValue(T arg) : Value(std::move(arg)) {}
};
}
diff --git a/src/mongo/db/pipeline/value_internal.h b/src/mongo/db/pipeline/value_internal.h
index 556eebec060..fe34b97e0a7 100644
--- a/src/mongo/db/pipeline/value_internal.h
+++ b/src/mongo/db/pipeline/value_internal.h
@@ -32,13 +32,13 @@
#include <boost/config.hpp>
#include <boost/intrusive_ptr.hpp>
+#include "mongo/bson/bsonmisc.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsontypes.h"
-#include "mongo/bson/bsonmisc.h"
#include "mongo/bson/oid.h"
+#include "mongo/bson/timestamp.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/intrusive_counter.h"
-#include "mongo/bson/timestamp.h"
namespace mongo {
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index 3dc4386f412..c5e54fedf31 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -112,12 +112,12 @@ TEST(CanonicalQueryTest, IsValidText) {
ASSERT_OK(isValid("{$text: {$search: 's'}}", *lpq));
// Valid: TEXT inside OR.
- ASSERT_OK(isValid(
- "{$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- "]}",
- *lpq));
+ ASSERT_OK(
+ isValid("{$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ "]}",
+ *lpq));
// Valid: TEXT outside NOR.
ASSERT_OK(isValid("{$text: {$search: 's'}, $nor: [{a: 1}, {b: 1}]}", *lpq));
@@ -126,37 +126,37 @@ TEST(CanonicalQueryTest, IsValidText) {
ASSERT_NOT_OK(isValid("{$nor: [{$text: {$search: 's'}}, {a: 1}]}", *lpq));
// Invalid: TEXT inside NOR.
- ASSERT_NOT_OK(isValid(
- "{$nor: ["
- " {$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- " ]},"
- " {a: 2}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$nor: ["
+ " {$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ " ]},"
+ " {a: 2}"
+ "]}",
+ *lpq));
// Invalid: >1 TEXT.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {$text: {$search: 's'}},"
- " {$text: {$search: 't'}}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$and: ["
+ " {$text: {$search: 's'}},"
+ " {$text: {$search: 't'}}"
+ "]}",
+ *lpq));
// Invalid: >1 TEXT.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- " ]},"
- " {$or: ["
- " {$text: {$search: 't'}},"
- " {b: 1}"
- " ]}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$and: ["
+ " {$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ " ]},"
+ " {$or: ["
+ " {$text: {$search: 't'}},"
+ " {b: 1}"
+ " ]}"
+ "]}",
+ *lpq));
}
TEST(CanonicalQueryTest, IsValidTextTailable) {
@@ -178,61 +178,61 @@ TEST(CanonicalQueryTest, IsValidGeo) {
ASSERT_OK(isValid("{a: {$near: [0, 0]}}", *lpq));
// Valid: GEO_NEAR inside nested AND.
- ASSERT_OK(isValid(
- "{$and: ["
- " {$and: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- " ]},"
- " {c: 1}"
- "]}",
- *lpq));
+ ASSERT_OK(
+ isValid("{$and: ["
+ " {$and: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ " ]},"
+ " {c: 1}"
+ "]}",
+ *lpq));
// Invalid: >1 GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {a: {$near: [0, 0]}},"
- " {b: {$near: [0, 0]}}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$and: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: {$near: [0, 0]}}"
+ "]}",
+ *lpq));
// Invalid: >1 GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {a: {$geoNear: [0, 0]}},"
- " {b: {$near: [0, 0]}}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$and: ["
+ " {a: {$geoNear: [0, 0]}},"
+ " {b: {$near: [0, 0]}}"
+ "]}",
+ *lpq));
// Invalid: >1 GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {$and: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- " ]},"
- " {$and: ["
- " {c: {$near: [0, 0]}},"
- " {d: 1}"
- " ]}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$and: ["
+ " {$and: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ " ]},"
+ " {$and: ["
+ " {c: {$near: [0, 0]}},"
+ " {d: 1}"
+ " ]}"
+ "]}",
+ *lpq));
// Invalid: GEO_NEAR inside NOR.
- ASSERT_NOT_OK(isValid(
- "{$nor: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$nor: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ "]}",
+ *lpq));
// Invalid: GEO_NEAR inside OR.
- ASSERT_NOT_OK(isValid(
- "{$or: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$or: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ "]}",
+ *lpq));
}
TEST(CanonicalQueryTest, IsValidTextAndGeo) {
@@ -247,13 +247,13 @@ TEST(CanonicalQueryTest, IsValidTextAndGeo) {
ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$geoNear: [0, 0]}}", *lpq));
// Invalid: TEXT and GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- " ],"
- " b: {$near: [0, 0]}}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ " ],"
+ " b: {$near: [0, 0]}}",
+ *lpq));
}
TEST(CanonicalQueryTest, IsValidTextAndNaturalAscending) {
diff --git a/src/mongo/db/query/collation/collation_serializer_test.cpp b/src/mongo/db/query/collation/collation_serializer_test.cpp
index 60e1e63d9f6..fd961b158a0 100644
--- a/src/mongo/db/query/collation/collation_serializer_test.cpp
+++ b/src/mongo/db/query/collation/collation_serializer_test.cpp
@@ -44,13 +44,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesDefaults) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 3
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -62,13 +71,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesCaseFirstUpper) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "upper"
- << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 3
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -80,13 +98,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesCaseFirstLower) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "lower"
- << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 3
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -98,13 +125,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesPrimaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 1 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 1
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -116,13 +152,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesSecondaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 2 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 2
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -134,13 +179,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesQuaternaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 4 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 4
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -152,13 +206,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesIdenticalStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 5 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 5
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -170,13 +233,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesAlternateShifted) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 3
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "shifted"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -188,13 +260,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesMaxVariableSpace) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 3
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "space"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
diff --git a/src/mongo/db/query/collation/collator_factory_icu.cpp b/src/mongo/db/query/collation/collator_factory_icu.cpp
index f72731d6e9b..5a4470d4bf5 100644
--- a/src/mongo/db/query/collation/collator_factory_icu.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu.cpp
@@ -183,9 +183,13 @@ StatusWith<CollationSpec::CaseFirstType> stringToCaseFirstType(const std::string
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kCaseFirstField << "' must be '"
- << CollationSpec::kCaseFirstUpper << "', '"
- << CollationSpec::kCaseFirstLower << "', or '"
- << CollationSpec::kCaseFirstOff << "'. Got: " << caseFirst};
+ << CollationSpec::kCaseFirstUpper
+ << "', '"
+ << CollationSpec::kCaseFirstLower
+ << "', or '"
+ << CollationSpec::kCaseFirstOff
+ << "'. Got: "
+ << caseFirst};
}
}
@@ -204,7 +208,8 @@ StatusWith<CollationSpec::StrengthType> integerToStrengthType(long long strength
}
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kStrengthField
- << "' must be an integer 1 through 5. Got: " << strength};
+ << "' must be an integer 1 through 5. Got: "
+ << strength};
}
StatusWith<CollationSpec::AlternateType> stringToAlternateType(const std::string& alternate) {
@@ -215,8 +220,11 @@ StatusWith<CollationSpec::AlternateType> stringToAlternateType(const std::string
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kAlternateField << "' must be '"
- << CollationSpec::kAlternateNonIgnorable << "' or '"
- << CollationSpec::kAlternateShifted << "'. Got: " << alternate};
+ << CollationSpec::kAlternateNonIgnorable
+ << "' or '"
+ << CollationSpec::kAlternateShifted
+ << "'. Got: "
+ << alternate};
}
}
@@ -228,8 +236,11 @@ StatusWith<CollationSpec::MaxVariableType> stringToMaxVariableType(const std::st
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kMaxVariableField << "' must be '"
- << CollationSpec::kMaxVariablePunct << "' or '"
- << CollationSpec::kMaxVariableSpace << "'. Got: " << maxVariable};
+ << CollationSpec::kMaxVariablePunct
+ << "' or '"
+ << CollationSpec::kMaxVariableSpace
+ << "'. Got: "
+ << maxVariable};
}
}
@@ -259,8 +270,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kCaseLevelField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.caseLevel = attributeToBool(caseLevelAttribute);
} else if (!parseStatus.isOK()) {
@@ -274,8 +287,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kCaseLevelField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -290,8 +305,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kCaseFirstField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.caseFirst = getCaseFirstFromAttribute(caseFirstAttribute);
} else if (!parseStatus.isOK()) {
@@ -313,8 +330,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kCaseFirstField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -329,8 +348,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kStrengthField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.strength = getStrengthFromAttribute(strengthAttribute);
} else if (!parseStatus.isOK()) {
@@ -351,8 +372,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kStrengthField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -368,8 +391,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kNumericOrderingField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.numericOrdering = attributeToBool(numericOrderingAttribute);
} else if (!parseStatus.isOK()) {
@@ -384,8 +409,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kNumericOrderingField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -401,8 +428,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kAlternateField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.alternate = getAlternateFromAttribute(alternateAttribute);
} else if (!parseStatus.isOK()) {
@@ -424,8 +453,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kAlternateField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -452,8 +483,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kMaxVariableField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -469,8 +502,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kNormalizationField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.normalization = attributeToBool(normalizationAttribute);
} else if (!parseStatus.isOK()) {
@@ -485,8 +520,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kNormalizationField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -502,8 +539,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kBackwardsField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.backwards = attributeToBool(backwardsAttribute);
} else if (!parseStatus.isOK()) {
@@ -518,8 +557,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kBackwardsField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -543,7 +584,8 @@ StatusWith<std::string> parseLocaleID(const BSONObj& spec) {
if (localeID.find('\0') != std::string::npos) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << CollationSpec::kLocaleField
- << "' cannot contain null byte. Collation spec: " << spec};
+ << "' cannot contain null byte. Collation spec: "
+ << spec};
}
return localeID;
}
@@ -559,13 +601,15 @@ Status validateLocaleID(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get locale from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << ". Collation spec: "
+ << spec};
}
if (originalID.empty()) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << CollationSpec::kLocaleField
- << "' cannot be the empty string in: " << spec};
+ << "' cannot be the empty string in: "
+ << spec};
}
// Check that each component of the locale ID is recognized by ICU. If ICU 1) cannot parse the
@@ -607,7 +651,8 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
return {ErrorCodes::FailedToParse,
str::stream() << "If " << CollationSpec::kLocaleField << "="
<< CollationSpec::kSimpleBinaryComparison
- << ", no other fields should be present in: " << spec};
+ << ", no other fields should be present in: "
+ << spec};
}
return {nullptr};
}
@@ -616,8 +661,8 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
auto userLocale = icu::Locale::createFromName(parsedLocaleID.getValue().c_str());
if (userLocale.isBogus()) {
return {ErrorCodes::BadValue,
- str::stream() << "Field '" << CollationSpec::kLocaleField
- << "' is not valid in: " << spec};
+ str::stream() << "Field '" << CollationSpec::kLocaleField << "' is not valid in: "
+ << spec};
}
// Construct an icu::Collator.
@@ -628,7 +673,8 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to create collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << ". Collation spec: "
+ << spec};
}
Status localeValidationStatus = validateLocaleID(spec, parsedLocaleID.getValue(), *icuCollator);
diff --git a/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp b/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
index 62d1432b899..46ddbf54d8b 100644
--- a/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
@@ -37,9 +37,8 @@ namespace mongo {
namespace {
-MONGO_INITIALIZER_WITH_PREREQUISITES(CreateCollatorFactory,
- ("SetGlobalEnvironment",
- "LoadICUData"))(InitializerContext* context) {
+MONGO_INITIALIZER_WITH_PREREQUISITES(CreateCollatorFactory, ("SetGlobalEnvironment", "LoadICUData"))
+(InitializerContext* context) {
CollatorFactoryInterface::set(getGlobalServiceContext(),
stdx::make_unique<CollatorFactoryICU>());
return Status::OK();
diff --git a/src/mongo/db/query/collation/collator_factory_icu_test.cpp b/src/mongo/db/query/collation/collator_factory_icu_test.cpp
index db829fb359a..aa77665040f 100644
--- a/src/mongo/db/query/collation/collator_factory_icu_test.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu_test.cpp
@@ -59,7 +59,8 @@ TEST(CollatorFactoryICUTest, SimpleLocaleWithOtherFieldsFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "simple"
- << "caseLevel" << true));
+ << "caseLevel"
+ << true));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -354,8 +355,10 @@ TEST(CollatorFactoryICUTest, TaiwanLocaleWithCollationStrokeDisallowed) {
TEST(CollatorFactoryICUTest, LocaleWithValidLanguageCountryAndVariantAllowed) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "en_US_POSIX")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "en_US_POSIX"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, USLocaleWithCollationPhonebookDisallowed) {
@@ -368,14 +371,18 @@ TEST(CollatorFactoryICUTest, USLocaleWithCollationPhonebookDisallowed) {
TEST(CollatorFactoryICUTest, GermanLocaleWithCollationPhonebookAllowed) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "de@collation=phonebook")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "de@collation=phonebook"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, ChineseTraditionalLocaleWithCollationPinyinAllowed) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "zh_Hant@collation=pinyin")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "zh_Hant@collation=pinyin"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, LocaleStringCannotContainNullByte) {
@@ -435,7 +442,8 @@ TEST(CollatorFactoryICUTest, CaseLevelFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseLevel" << false));
+ << "caseLevel"
+ << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().caseLevel);
}
@@ -444,7 +452,8 @@ TEST(CollatorFactoryICUTest, CaseLevelTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseLevel" << true));
+ << "caseLevel"
+ << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().caseLevel);
}
@@ -486,7 +495,8 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1));
+ << "strength"
+ << 1));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -496,7 +506,8 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 2));
+ << "strength"
+ << 2));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kSecondary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -506,7 +517,8 @@ TEST(CollatorFactoryICUTest, TertiaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 3));
+ << "strength"
+ << 3));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kTertiary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -516,7 +528,8 @@ TEST(CollatorFactoryICUTest, QuaternaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 4));
+ << "strength"
+ << 4));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kQuaternary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -526,7 +539,8 @@ TEST(CollatorFactoryICUTest, IdenticalStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 5));
+ << "strength"
+ << 5));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kIdentical),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -536,7 +550,8 @@ TEST(CollatorFactoryICUTest, NumericOrderingFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering" << false));
+ << "numericOrdering"
+ << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().numericOrdering);
}
@@ -545,7 +560,8 @@ TEST(CollatorFactoryICUTest, NumericOrderingTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering" << true));
+ << "numericOrdering"
+ << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().numericOrdering);
}
@@ -598,7 +614,8 @@ TEST(CollatorFactoryICUTest, NormalizationFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "normalization" << false));
+ << "normalization"
+ << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().normalization);
}
@@ -607,7 +624,8 @@ TEST(CollatorFactoryICUTest, NormalizationTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "normalization" << true));
+ << "normalization"
+ << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().normalization);
}
@@ -616,7 +634,8 @@ TEST(CollatorFactoryICUTest, BackwardsFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards" << false));
+ << "backwards"
+ << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().backwards);
}
@@ -625,7 +644,8 @@ TEST(CollatorFactoryICUTest, BackwardsTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards" << true));
+ << "backwards"
+ << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().backwards);
}
@@ -634,7 +654,8 @@ TEST(CollatorFactoryICUTest, LongStrengthFieldParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1LL));
+ << "strength"
+ << 1LL));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -644,7 +665,8 @@ TEST(CollatorFactoryICUTest, DoubleStrengthFieldParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1.0));
+ << "strength"
+ << 1.0));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -664,7 +686,8 @@ TEST(CollatorFactoryICUTest, NonStringCaseFirstFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseFirst" << 1));
+ << "caseFirst"
+ << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -693,7 +716,8 @@ TEST(CollatorFactoryICUTest, TooLargeStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 2147483648LL));
+ << "strength"
+ << 2147483648LL));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -702,7 +726,8 @@ TEST(CollatorFactoryICUTest, FractionalStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 0.5));
+ << "strength"
+ << 0.5));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::BadValue);
}
@@ -711,7 +736,8 @@ TEST(CollatorFactoryICUTest, NegativeStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << -1));
+ << "strength"
+ << -1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -720,7 +746,8 @@ TEST(CollatorFactoryICUTest, InvalidIntegerStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 6));
+ << "strength"
+ << 6));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -739,7 +766,8 @@ TEST(CollatorFactoryICUTest, NonStringAlternateFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "alternate" << 1));
+ << "alternate"
+ << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -758,7 +786,8 @@ TEST(CollatorFactoryICUTest, NonStringMaxVariableFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "maxVariable" << 1));
+ << "maxVariable"
+ << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -810,7 +839,8 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCollatorIgnoresCaseAndAccents) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1));
+ << "strength"
+ << 1));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -822,7 +852,8 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthCollatorsIgnoresCaseButNotAccents)
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 2));
+ << "strength"
+ << 2));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -834,7 +865,8 @@ TEST(CollatorFactoryICUTest, TertiaryStrengthCollatorConsidersCaseAndAccents) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 3));
+ << "strength"
+ << 3));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -846,7 +878,10 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1 << "caseLevel" << true));
+ << "strength"
+ << 1
+ << "caseLevel"
+ << true));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -856,11 +891,14 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrue) {
TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrueCaseFirstUpper) {
CollatorFactoryICU factory;
- auto collator =
- factory.makeFromBSON(BSON("locale"
- << "en_US"
- << "strength" << 1 << "caseLevel" << true << "caseFirst"
- << "upper"));
+ auto collator = factory.makeFromBSON(BSON("locale"
+ << "en_US"
+ << "strength"
+ << 1
+ << "caseLevel"
+ << true
+ << "caseFirst"
+ << "upper"));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -870,11 +908,14 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrueCaseFirstUpper) {
TEST(CollatorFactoryICUTest, TertiaryStrengthCaseLevelTrueCaseFirstUpper) {
CollatorFactoryICU factory;
- auto collator =
- factory.makeFromBSON(BSON("locale"
- << "en_US"
- << "strength" << 3 << "caseLevel" << true << "caseFirst"
- << "upper"));
+ auto collator = factory.makeFromBSON(BSON("locale"
+ << "en_US"
+ << "strength"
+ << 3
+ << "caseLevel"
+ << true
+ << "caseFirst"
+ << "upper"));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("A", "a"), 0);
}
@@ -891,7 +932,8 @@ TEST(CollatorFactoryICUTest, NumericOrderingTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering" << true));
+ << "numericOrdering"
+ << true));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("2", "10"), 0);
}
@@ -900,7 +942,9 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthAlternateShifted) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1 << "alternate"
+ << "strength"
+ << 1
+ << "alternate"
<< "shifted"));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(collator.getValue()->compare("a b", "ab"), 0);
@@ -911,7 +955,9 @@ TEST(CollatorFactoryICUTest, QuaternaryStrengthAlternateShifted) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 4 << "alternate"
+ << "strength"
+ << 4
+ << "alternate"
<< "shifted"));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("a b", "ab"), 0);
@@ -922,7 +968,9 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthAlternateShiftedMaxVariableSpace) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1 << "alternate"
+ << "strength"
+ << 1
+ << "alternate"
<< "shifted"
<< "maxVariable"
<< "space"));
@@ -935,7 +983,8 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthBackwardsFalse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 2));
+ << "strength"
+ << 2));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -946,7 +995,10 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthBackwardsTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 2 << "backwards" << true));
+ << "strength"
+ << 2
+ << "backwards"
+ << true));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -975,320 +1027,426 @@ TEST(CollatorFactoryICUTest, FactoryMadeCollatorComparisonKeysCorrectEnUS) {
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithArabicLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ar")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ar"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithArmenianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "hy")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "hy"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithBengaliLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "bn")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "bn"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithCatalanLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ca")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ca"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithChineseLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "zh")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "zh"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithChineseTraditionalLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "zh_Hant")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "zh_Hant"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithCroatianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "hr")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "hr"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithCzechLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "cs")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "cs"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithDanishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "da")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "da"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithEnglishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "en")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "en"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithEnglishUnitedStatesLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "en_US")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "en_US"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithEnglishUnitedStatesComputerLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "en_US_POSIX")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "en_US_POSIX"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithEstonianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "et")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "et"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithFilipinoLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "fil")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "fil"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithFinnishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "fi")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "fi"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithFrenchLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "fr")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "fr"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithFrenchCanadaLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "fr_CA")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "fr_CA"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithGeorgianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ka")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ka"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithGermanLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "de")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "de"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithGermanAustriaLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "de_AT")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "de_AT"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithGreekLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "el")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "el"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithHebrewLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "he")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "he"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithHindiLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "hi")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "hi"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithHungarianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "hu")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "hu"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithIcelandicLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "is")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "is"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithIndonesianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "id")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "id"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithIrishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ga")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ga"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithItalianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "it")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "it"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithJapaneseLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ja")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ja"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithKoreanLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ko")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ko"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithLatvianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "lv")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "lv"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithLithuanianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "lt")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "lt"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithNorwegianNynorskLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "nn")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "nn"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithPashtoLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ps")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ps"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithPersianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "fa")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "fa"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithPersianAfghanistanLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "fa_AF")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "fa_AF"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithPolishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "pl")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "pl"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithPortugueseLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "pt")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "pt"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithPunjabiLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "pa")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "pa"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithRomanianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ro")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ro"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithRussianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ru")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ru"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithSlovakLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "sk")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "sk"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithSlovenianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "sl")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "sl"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithSpanishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "es")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "es"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithSwedishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "sv")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "sv"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithThaiLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "th")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "th"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithTurkishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "tr")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "tr"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithUkrainianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "uk")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "uk"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithUrduLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ur")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ur"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithVietnameseLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "vi")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "vi"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationFailsWithAfrikaansLocale) {
CollatorFactoryICU factory;
- ASSERT_NOT_OK(factory.makeFromBSON(BSON("locale"
- << "af")).getStatus());
+ ASSERT_NOT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "af"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationFailsWithEsperantoLocale) {
CollatorFactoryICU factory;
- ASSERT_NOT_OK(factory.makeFromBSON(BSON("locale"
- << "eo")).getStatus());
+ ASSERT_NOT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "eo"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationFailsWithSwahiliLocale) {
CollatorFactoryICU factory;
- ASSERT_NOT_OK(factory.makeFromBSON(BSON("locale"
- << "sw")).getStatus());
+ ASSERT_NOT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "sw"))
+ .getStatus());
}
} // namespace
diff --git a/src/mongo/db/query/collation/collator_factory_mock.cpp b/src/mongo/db/query/collation/collator_factory_mock.cpp
index 503b84c7f79..f6d10450de9 100644
--- a/src/mongo/db/query/collation/collator_factory_mock.cpp
+++ b/src/mongo/db/query/collation/collator_factory_mock.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/query/collation/collator_factory_mock.h"
-#include "mongo/bson/bsonobj.h"
#include "mongo/base/status_with.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/db/query/count_request_test.cpp b/src/mongo/db/query/count_request_test.cpp
index cea7d0e3885..3b7ceeb400b 100644
--- a/src/mongo/db/query/count_request_test.cpp
+++ b/src/mongo/db/query/count_request_test.cpp
@@ -30,8 +30,8 @@
#include "mongo/bson/json.h"
#include "mongo/db/query/count_request.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
namespace {
@@ -41,7 +41,8 @@ TEST(CountRequest, ParseDefaults) {
CountRequest::parseFromBSON("TestDB",
BSON("count"
<< "TestColl"
- << "query" << BSON("a" << BSON("$lte" << 10))));
+ << "query"
+ << BSON("a" << BSON("$lte" << 10))));
ASSERT_OK(countRequestStatus.getStatus());
@@ -62,10 +63,17 @@ TEST(CountRequest, ParseComplete) {
CountRequest::parseFromBSON("TestDB",
BSON("count"
<< "TestColl"
- << "query" << BSON("a" << BSON("$gte" << 11)) << "limit"
- << 100 << "skip" << 1000 << "hint" << BSON("b" << 5)
- << "collation" << BSON("locale"
- << "en_US")));
+ << "query"
+ << BSON("a" << BSON("$gte" << 11))
+ << "limit"
+ << 100
+ << "skip"
+ << 1000
+ << "hint"
+ << BSON("b" << 5)
+ << "collation"
+ << BSON("locale"
+ << "en_US")));
ASSERT_OK(countRequestStatus.getStatus());
@@ -84,10 +92,17 @@ TEST(CountRequest, ParseNegativeLimit) {
CountRequest::parseFromBSON("TestDB",
BSON("count"
<< "TestColl"
- << "query" << BSON("a" << BSON("$gte" << 11)) << "limit"
- << -100 << "skip" << 1000 << "hint" << BSON("b" << 5)
- << "collation" << BSON("locale"
- << "en_US")));
+ << "query"
+ << BSON("a" << BSON("$gte" << 11))
+ << "limit"
+ << -100
+ << "skip"
+ << 1000
+ << "hint"
+ << BSON("b" << 5)
+ << "collation"
+ << BSON("locale"
+ << "en_US")));
ASSERT_OK(countRequestStatus.getStatus());
@@ -113,7 +128,9 @@ TEST(CountRequest, FailParseBadSkipValue) {
CountRequest::parseFromBSON("TestDB",
BSON("count"
<< "TestColl"
- << "query" << BSON("a" << BSON("$gte" << 11)) << "skip"
+ << "query"
+ << BSON("a" << BSON("$gte" << 11))
+ << "skip"
<< -1000));
ASSERT_EQUALS(countRequestStatus.getStatus(), ErrorCodes::BadValue);
@@ -124,7 +141,8 @@ TEST(CountRequest, FailParseBadCollationValue) {
CountRequest::parseFromBSON("TestDB",
BSON("count"
<< "TestColl"
- << "query" << BSON("a" << BSON("$gte" << 11))
+ << "query"
+ << BSON("a" << BSON("$gte" << 11))
<< "collation"
<< "en_US"));
@@ -140,13 +158,13 @@ TEST(CountRequest, ToBSON) {
<< "en_US"));
BSONObj actualObj = countRequest.toBSON();
- BSONObj expectedObj(fromjson(
- "{ count : 'TestDB.TestColl',"
- " query : { a : { '$gte' : 11 } },"
- " limit : 100,"
- " skip : 1000,"
- " hint : { b : 5 },"
- " collation : { locale : 'en_US' } },"));
+ BSONObj expectedObj(
+ fromjson("{ count : 'TestDB.TestColl',"
+ " query : { a : { '$gte' : 11 } },"
+ " limit : 100,"
+ " skip : 1000,"
+ " hint : { b : 5 },"
+ " collation : { locale : 'en_US' } },"));
ASSERT_EQUALS(actualObj, expectedObj);
}
diff --git a/src/mongo/db/query/cursor_response.cpp b/src/mongo/db/query/cursor_response.cpp
index bf812d302a8..33a1661ea0b 100644
--- a/src/mongo/db/query/cursor_response.cpp
+++ b/src/mongo/db/query/cursor_response.cpp
@@ -123,24 +123,24 @@ StatusWith<CursorResponse> CursorResponse::parseFromBSON(const BSONObj& cmdRespo
BSONElement cursorElt = cmdResponse[kCursorField];
if (cursorElt.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kCursorField
- << "' must be a nested object in: " << cmdResponse};
+ str::stream() << "Field '" << kCursorField << "' must be a nested object in: "
+ << cmdResponse};
}
BSONObj cursorObj = cursorElt.Obj();
BSONElement idElt = cursorObj[kIdField];
if (idElt.type() != BSONType::NumberLong) {
- return {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kIdField
- << "' must be of type long in: " << cmdResponse};
+ return {
+ ErrorCodes::TypeMismatch,
+ str::stream() << "Field '" << kIdField << "' must be of type long in: " << cmdResponse};
}
cursorId = idElt.Long();
BSONElement nsElt = cursorObj[kNsField];
if (nsElt.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kNsField
- << "' must be of type string in: " << cmdResponse};
+ str::stream() << "Field '" << kNsField << "' must be of type string in: "
+ << cmdResponse};
}
fullns = nsElt.String();
@@ -152,16 +152,18 @@ StatusWith<CursorResponse> CursorResponse::parseFromBSON(const BSONObj& cmdRespo
if (batchElt.type() != BSONType::Array) {
return {ErrorCodes::TypeMismatch,
str::stream() << "Must have array field '" << kBatchFieldInitial << "' or '"
- << kBatchField << "' in: " << cmdResponse};
+ << kBatchField
+ << "' in: "
+ << cmdResponse};
}
batchObj = batchElt.Obj();
std::vector<BSONObj> batch;
for (BSONElement elt : batchObj) {
if (elt.type() != BSONType::Object) {
- return {
- ErrorCodes::BadValue,
- str::stream() << "getMore response batch contains a non-object element: " << elt};
+ return {ErrorCodes::BadValue,
+ str::stream() << "getMore response batch contains a non-object element: "
+ << elt};
}
batch.push_back(elt.Obj().getOwned());
diff --git a/src/mongo/db/query/cursor_response_test.cpp b/src/mongo/db/query/cursor_response_test.cpp
index 0229f526ca4..711d83d4213 100644
--- a/src/mongo/db/query/cursor_response_test.cpp
+++ b/src/mongo/db/query/cursor_response_test.cpp
@@ -37,11 +37,13 @@ namespace mongo {
namespace {
TEST(CursorResponseTest, parseFromBSONFirstBatch) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -53,11 +55,13 @@ TEST(CursorResponseTest, parseFromBSONFirstBatch) {
}
TEST(CursorResponseTest, parseFromBSONNextBatch) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -69,11 +73,13 @@ TEST(CursorResponseTest, parseFromBSONNextBatch) {
}
TEST(CursorResponseTest, parseFromBSONCursorIdZero) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
- "cursor" << BSON("id" << CursorId(0) << "ns"
- << "db.coll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(0) << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -85,10 +91,13 @@ TEST(CursorResponseTest, parseFromBSONCursorIdZero) {
}
TEST(CursorResponseTest, parseFromBSONEmptyBatch) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch" << BSONArrayBuilder().arr()) << "ok" << 1));
+ StatusWith<CursorResponse> result =
+ CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSONArrayBuilder().arr())
+ << "ok"
+ << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -109,16 +118,20 @@ TEST(CursorResponseTest, parseFromBSONCursorFieldWrongType) {
}
TEST(CursorResponseTest, parseFromBSONNsFieldMissing) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
- "cursor" << BSON("id" << CursorId(123) << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2))) << "ok" << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "firstBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONNsFieldWrongType) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns" << 456 << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2))) << "ok" << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns" << 456 << "firstBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -126,8 +139,10 @@ TEST(CursorResponseTest, parseFromBSONIdFieldMissing) {
StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
BSON("cursor" << BSON("ns"
<< "db.coll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1));
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -137,39 +152,50 @@ TEST(CursorResponseTest, parseFromBSONIdFieldWrongType) {
<< "123"
<< "ns"
<< "db.coll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1));
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONBatchFieldMissing) {
StatusWith<CursorResponse> result =
CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll") << "ok" << 1));
+ << "db.coll")
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONFirstBatchFieldWrongType) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "firstBatch" << BSON("_id" << 1)) << "ok" << 1));
+ StatusWith<CursorResponse> result =
+ CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON("_id" << 1))
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONNextBatchFieldWrongType) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch" << BSON("_id" << 1)) << "ok" << 1));
+ StatusWith<CursorResponse> result =
+ CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON("_id" << 1))
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONOkFieldMissing) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))));
ASSERT_NOT_OK(result.getStatus());
}
@@ -186,11 +212,13 @@ TEST(CursorResponseTest, toBSONInitialResponse) {
std::vector<BSONObj> batch = {BSON("_id" << 1), BSON("_id" << 2)};
CursorResponse response(NamespaceString("testdb.testcoll"), CursorId(123), batch);
BSONObj responseObj = response.toBSON(CursorResponse::ResponseType::InitialResponse);
- BSONObj expectedResponse = BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1.0);
+ BSONObj expectedResponse =
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1.0);
ASSERT_EQ(responseObj, expectedResponse);
}
@@ -198,11 +226,13 @@ TEST(CursorResponseTest, toBSONSubsequentResponse) {
std::vector<BSONObj> batch = {BSON("_id" << 1), BSON("_id" << 2)};
CursorResponse response(NamespaceString("testdb.testcoll"), CursorId(123), batch);
BSONObj responseObj = response.toBSON(CursorResponse::ResponseType::SubsequentResponse);
- BSONObj expectedResponse = BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1.0);
+ BSONObj expectedResponse =
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1.0);
ASSERT_EQ(responseObj, expectedResponse);
}
@@ -214,11 +244,13 @@ TEST(CursorResponseTest, addToBSONInitialResponse) {
response.addToBSON(CursorResponse::ResponseType::InitialResponse, &builder);
BSONObj responseObj = builder.obj();
- BSONObj expectedResponse = BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1.0);
+ BSONObj expectedResponse =
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1.0);
ASSERT_EQ(responseObj, expectedResponse);
}
@@ -230,11 +262,13 @@ TEST(CursorResponseTest, addToBSONSubsequentResponse) {
response.addToBSON(CursorResponse::ResponseType::SubsequentResponse, &builder);
BSONObj responseObj = builder.obj();
- BSONObj expectedResponse = BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1.0);
+ BSONObj expectedResponse =
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1.0);
ASSERT_EQ(responseObj, expectedResponse);
}
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 4e0c82aaa81..6475b5aaa5f 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -39,13 +39,13 @@
#include "mongo/db/exec/multi_plan.h"
#include "mongo/db/exec/near.h"
#include "mongo/db/exec/text.h"
+#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/plan_summary_stats.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_settings.h"
#include "mongo/db/query/stage_builder.h"
-#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/server_options.h"
#include "mongo/db/server_parameters.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/query/expression_index.cpp b/src/mongo/db/query/expression_index.cpp
index 6212c28016c..ff24396baeb 100644
--- a/src/mongo/db/query/expression_index.cpp
+++ b/src/mongo/db/query/expression_index.cpp
@@ -34,8 +34,8 @@
#include "mongo/db/geo/r2_region_coverer.h"
#include "mongo/db/hasher.h"
#include "mongo/db/index/expression_params.h"
-#include "mongo/db/server_parameters.h"
#include "mongo/db/query/expression_index_knobs.h"
+#include "mongo/db/server_parameters.h"
#include "third_party/s2/s2cellid.h"
#include "third_party/s2/s2region.h"
#include "third_party/s2/s2regioncoverer.h"
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 0888b5e7085..88f09fcfa19 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -42,7 +42,6 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/filter.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/keypattern.h"
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/query/explain.h"
@@ -55,6 +54,7 @@
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/server_options.h"
#include "mongo/db/server_parameters.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/stale_exception.h"
@@ -311,7 +311,9 @@ QueryResult::View getMore(OperationContext* txn,
// there for the cursor.
uassert(ErrorCodes::Unauthorized,
str::stream() << "Requested getMore on namespace " << ns << ", but cursor "
- << cursorid << " belongs to namespace " << cc->ns(),
+ << cursorid
+ << " belongs to namespace "
+ << cc->ns(),
ns == cc->ns());
*isCursorAuthorized = true;
@@ -504,9 +506,9 @@ std::string runQuery(OperationContext* txn,
auto statusWithCQ = CanonicalQuery::canonicalize(txn, q, ExtensionsCallbackReal(txn, &nss));
if (!statusWithCQ.isOK()) {
- uasserted(
- 17287,
- str::stream() << "Can't canonicalize query: " << statusWithCQ.getStatus().toString());
+ uasserted(17287,
+ str::stream() << "Can't canonicalize query: "
+ << statusWithCQ.getStatus().toString());
}
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
invariant(cq.get());
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index f97810a90e0..c2330958e76 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -51,8 +51,8 @@
#include "mongo/db/exec/sort_key_generator.h"
#include "mongo/db/exec/subplan.h"
#include "mongo/db/exec/update.h"
-#include "mongo/db/index_names.h"
#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/index_names.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
#include "mongo/db/matcher/extensions_callback_noop.h"
#include "mongo/db/matcher/extensions_callback_real.h"
@@ -71,14 +71,14 @@
#include "mongo/db/query/query_settings.h"
#include "mongo/db/query/stage_builder.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/server_options.h"
-#include "mongo/db/server_parameters.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/sharding_state.h"
-#include "mongo/db/storage/storage_options.h"
+#include "mongo/db/server_options.h"
+#include "mongo/db/server_parameters.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/oplog_hack.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/scripting/engine.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
@@ -774,7 +774,8 @@ inline void validateUpdate(const char* ns, const BSONObj& updateobj, const BSONO
has pointers into it */
uassert(10156,
str::stream() << "cannot update system collection: " << ns << " q: " << patternOrig
- << " u: " << updateobj,
+ << " u: "
+ << updateobj,
legalClientSystemNS(ns, true));
}
}
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index 0488aeccf7d..c15c7144370 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -26,16 +26,16 @@
* it in the license file.
*/
-#include "mongo/db/query/canonical_query.h"
-#include "mongo/db/query/plan_executor.h"
-#include "mongo/db/query/query_planner_params.h"
-#include "mongo/db/query/query_settings.h"
-#include "mongo/db/query/query_solution.h"
#include "mongo/db/ops/delete_request.h"
#include "mongo/db/ops/parsed_delete.h"
#include "mongo/db/ops/parsed_update.h"
#include "mongo/db/ops/update_driver.h"
#include "mongo/db/ops/update_request.h"
+#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/plan_executor.h"
+#include "mongo/db/query/query_planner_params.h"
+#include "mongo/db/query/query_settings.h"
+#include "mongo/db/query/query_solution.h"
namespace mongo {
diff --git a/src/mongo/db/query/getmore_request.cpp b/src/mongo/db/query/getmore_request.cpp
index 4662942294c..b1a97f32c5e 100644
--- a/src/mongo/db/query/getmore_request.cpp
+++ b/src/mongo/db/query/getmore_request.cpp
@@ -81,7 +81,8 @@ Status GetMoreRequest::isValid() const {
if (batchSize && *batchSize <= 0) {
return Status(ErrorCodes::BadValue,
str::stream() << "Batch size for getMore must be positive, "
- << "but received: " << *batchSize);
+ << "but received: "
+ << *batchSize);
}
return Status::OK();
@@ -122,8 +123,8 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
} else if (str::equals(fieldName, kCollectionField)) {
if (el.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream()
- << "Field 'collection' must be of type string in: " << cmdObj};
+ str::stream() << "Field 'collection' must be of type string in: "
+ << cmdObj};
}
fullns = parseNs(dbname, cmdObj);
@@ -159,7 +160,9 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
} else if (!str::startsWith(fieldName, "$")) {
return {ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj << ". "
- << "Unrecognized field '" << fieldName << "'."};
+ << "Unrecognized field '"
+ << fieldName
+ << "'."};
}
}
diff --git a/src/mongo/db/query/getmore_request_test.cpp b/src/mongo/db/query/getmore_request_test.cpp
index 997310c9645..94dfb9ba366 100644
--- a/src/mongo/db/query/getmore_request_test.cpp
+++ b/src/mongo/db/query/getmore_request_test.cpp
@@ -30,9 +30,9 @@
#include <string>
-#include "mongo/db/repl/optime.h"
-#include "mongo/db/query/getmore_request.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/query/getmore_request.h"
+#include "mongo/db/repl/optime.h"
#include "mongo/unittest/unittest.h"
@@ -60,7 +60,8 @@ TEST(GetMoreRequestTest, parseFromBSONCursorIdNotLongLong) {
StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db",
BSON("getMore"
<< "not a number"
- << "collection" << 123));
+ << "collection"
+ << 123));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
}
@@ -115,7 +116,8 @@ TEST(GetMoreRequestTest, parseFromBSONUnrecognizedFieldName) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "unknown_field" << 1));
+ << "unknown_field"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
}
@@ -125,7 +127,8 @@ TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSize) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize" << -1));
+ << "batchSize"
+ << -1));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
}
@@ -135,7 +138,8 @@ TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSizeOfZero) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize" << 0));
+ << "batchSize"
+ << 0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
}
@@ -156,7 +160,8 @@ TEST(GetMoreRequestTest, parseFromBSONBatchSizeProvided) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize" << 200));
+ << "batchSize"
+ << 200));
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
ASSERT(result.getValue().batchSize);
@@ -180,7 +185,8 @@ TEST(GetMoreRequestTest, parseFromBSONHasMaxTimeMS) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "maxTimeMS" << 100));
+ << "maxTimeMS"
+ << 100));
ASSERT_OK(result.getStatus());
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT(result.getValue().awaitDataTimeout);
@@ -193,7 +199,8 @@ TEST(GetMoreRequestTest, parseFromBSONHasMaxTimeMSOfZero) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "maxTimeMS" << 0));
+ << "maxTimeMS"
+ << 0));
ASSERT_OK(result.getStatus());
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
@@ -208,7 +215,8 @@ TEST(GetMoreRequestTest, toBSONHasBatchSize) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "batchSize" << 99);
+ << "batchSize"
+ << 99);
ASSERT_EQ(requestObj, expectedRequest);
}
@@ -231,7 +239,10 @@ TEST(GetMoreRequestTest, toBSONHasTerm) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "batchSize" << 99 << "term" << 1);
+ << "batchSize"
+ << 99
+ << "term"
+ << 1);
ASSERT_EQ(requestObj, expectedRequest);
}
@@ -243,11 +254,14 @@ TEST(GetMoreRequestTest, toBSONHasCommitLevel) {
1,
repl::OpTime(Timestamp(0, 10), 2));
BSONObj requestObj = request.toBSON();
- BSONObj expectedRequest =
- BSON("getMore" << CursorId(123) << "collection"
- << "testcoll"
- << "batchSize" << 99 << "term" << 1 << "lastKnownCommittedOpTime"
- << BSON("ts" << Timestamp(0, 10) << "t" << 2LL));
+ BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
+ << "testcoll"
+ << "batchSize"
+ << 99
+ << "term"
+ << 1
+ << "lastKnownCommittedOpTime"
+ << BSON("ts" << Timestamp(0, 10) << "t" << 2LL));
ASSERT_EQ(requestObj, expectedRequest);
}
@@ -261,7 +275,8 @@ TEST(GetMoreRequestTest, toBSONHasMaxTimeMS) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "maxTimeMS" << 789);
+ << "maxTimeMS"
+ << 789);
ASSERT_EQ(requestObj, expectedRequest);
}
diff --git a/src/mongo/db/query/index_bounds_builder.h b/src/mongo/db/query/index_bounds_builder.h
index b37901f6d2d..5d3c02e029f 100644
--- a/src/mongo/db/query/index_bounds_builder.h
+++ b/src/mongo/db/query/index_bounds_builder.h
@@ -28,8 +28,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/hasher.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/query/index_bounds.h"
#include "mongo/db/query/index_entry.h"
diff --git a/src/mongo/db/query/index_bounds_builder_test.cpp b/src/mongo/db/query/index_bounds_builder_test.cpp
index 4d93b8b63c9..af346b5f908 100644
--- a/src/mongo/db/query/index_bounds_builder_test.cpp
+++ b/src/mongo/db/query/index_bounds_builder_test.cpp
@@ -571,9 +571,8 @@ TEST(IndexBoundsBuilderTest, TranslateLteBinData) {
ASSERT_EQ(oil.intervals.size(), 1U);
ASSERT_EQ(Interval::INTERVAL_EQUALS,
oil.intervals[0].compare(
- Interval(fromjson(
- "{'': {$binary: '', $type: '00'},"
- "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
+ Interval(fromjson("{'': {$binary: '', $type: '00'},"
+ "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
true,
true)));
ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
@@ -593,9 +592,8 @@ TEST(IndexBoundsBuilderTest, TranslateLtBinData) {
ASSERT_EQ(oil.intervals.size(), 1U);
ASSERT_EQ(Interval::INTERVAL_EQUALS,
oil.intervals[0].compare(
- Interval(fromjson(
- "{'': {$binary: '', $type: '00'},"
- "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
+ Interval(fromjson("{'': {$binary: '', $type: '00'},"
+ "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
true,
false)));
ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
@@ -615,9 +613,8 @@ TEST(IndexBoundsBuilderTest, TranslateGtBinData) {
ASSERT_EQ(oil.intervals.size(), 1U);
ASSERT_EQ(Interval::INTERVAL_EQUALS,
oil.intervals[0].compare(
- Interval(fromjson(
- "{'': {$binary: '////////////////////////////', $type: '00'},"
- "'': ObjectId('000000000000000000000000')}"),
+ Interval(fromjson("{'': {$binary: '////////////////////////////', $type: '00'},"
+ "'': ObjectId('000000000000000000000000')}"),
false,
false)));
ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
@@ -637,9 +634,8 @@ TEST(IndexBoundsBuilderTest, TranslateGteBinData) {
ASSERT_EQ(oil.intervals.size(), 1U);
ASSERT_EQ(Interval::INTERVAL_EQUALS,
oil.intervals[0].compare(
- Interval(fromjson(
- "{'': {$binary: '////////////////////////////', $type: '00'},"
- "'': ObjectId('000000000000000000000000')}"),
+ Interval(fromjson("{'': {$binary: '////////////////////////////', $type: '00'},"
+ "'': ObjectId('000000000000000000000000')}"),
true,
false)));
ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
diff --git a/src/mongo/db/query/index_bounds_test.cpp b/src/mongo/db/query/index_bounds_test.cpp
index 75b49f6429e..250563b54ce 100644
--- a/src/mongo/db/query/index_bounds_test.cpp
+++ b/src/mongo/db/query/index_bounds_test.cpp
@@ -32,13 +32,13 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/query/index_bounds.h"
-#include "mongo/db/json.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/json.h"
+#include "mongo/db/query/index_bounds.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
-#include "mongo/util/text.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/text.h"
using namespace mongo;
diff --git a/src/mongo/db/query/killcursors_request.cpp b/src/mongo/db/query/killcursors_request.cpp
index 6d95311accd..c446998eaa1 100644
--- a/src/mongo/db/query/killcursors_request.cpp
+++ b/src/mongo/db/query/killcursors_request.cpp
@@ -66,8 +66,8 @@ StatusWith<KillCursorsRequest> KillCursorsRequest::parseFromBSON(const std::stri
if (cmdObj[kCursorsField].type() != BSONType::Array) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Field '" << kCursorsField
- << "' must be of type array in: " << cmdObj};
+ str::stream() << "Field '" << kCursorsField << "' must be of type array in: "
+ << cmdObj};
}
std::vector<CursorId> cursorIds;
diff --git a/src/mongo/db/query/killcursors_request_test.cpp b/src/mongo/db/query/killcursors_request_test.cpp
index 74ce8bfa31e..19c220a7408 100644
--- a/src/mongo/db/query/killcursors_request_test.cpp
+++ b/src/mongo/db/query/killcursors_request_test.cpp
@@ -94,7 +94,8 @@ TEST(KillCursorsRequestTest, parseFromBSONCursorFieldNotArray) {
KillCursorsRequest::parseFromBSON("db",
BSON("killCursors"
<< "coll"
- << "cursors" << CursorId(123)));
+ << "cursors"
+ << CursorId(123)));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -104,7 +105,8 @@ TEST(KillCursorsRequestTest, parseFromBSONCursorFieldEmptyArray) {
KillCursorsRequest::parseFromBSON("db",
BSON("killCursors"
<< "coll"
- << "cursors" << BSONArrayBuilder().arr()));
+ << "cursors"
+ << BSONArrayBuilder().arr()));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::BadValue);
}
@@ -129,7 +131,8 @@ TEST(KillCursorsRequestTest, toBSON) {
BSONObj requestObj = request.toBSON();
BSONObj expectedObj = BSON("killCursors"
<< "coll"
- << "cursors" << BSON_ARRAY(CursorId(123) << CursorId(456)));
+ << "cursors"
+ << BSON_ARRAY(CursorId(123) << CursorId(456)));
ASSERT_EQ(requestObj, expectedObj);
}
diff --git a/src/mongo/db/query/killcursors_response.cpp b/src/mongo/db/query/killcursors_response.cpp
index 0484a1100fa..2cf7d998d8f 100644
--- a/src/mongo/db/query/killcursors_response.cpp
+++ b/src/mongo/db/query/killcursors_response.cpp
@@ -50,8 +50,8 @@ Status fillOutCursorArray(const BSONObj& cmdResponse,
if (elt.type() != BSONType::Array) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Field '" << fieldName
- << "' must be of type array in: " << cmdResponse};
+ str::stream() << "Field '" << fieldName << "' must be of type array in: "
+ << cmdResponse};
}
for (BSONElement cursorElt : elt.Obj()) {
diff --git a/src/mongo/db/query/killcursors_response_test.cpp b/src/mongo/db/query/killcursors_response_test.cpp
index e2a56af98c4..0fe3d996edf 100644
--- a/src/mongo/db/query/killcursors_response_test.cpp
+++ b/src/mongo/db/query/killcursors_response_test.cpp
@@ -40,9 +40,13 @@ namespace {
TEST(KillCursorsResponseTest, parseFromBSONSuccess) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
+ << BSON_ARRAY(CursorId(456) << CursorId(6))
+ << "cursorsAlive"
<< BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "cursorsUnknown" << BSONArray() << "ok" << 1.0));
+ << "cursorsUnknown"
+ << BSONArray()
+ << "ok"
+ << 1.0));
ASSERT_OK(result.getStatus());
KillCursorsResponse response = result.getValue();
ASSERT_EQ(response.cursorsKilled.size(), 1U);
@@ -60,8 +64,11 @@ TEST(KillCursorsResponseTest, parseFromBSONSuccess) {
TEST(KillCursorsResponseTest, parseFromBSONSuccessOmitCursorsAlive) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsUnknown"
- << BSON_ARRAY(CursorId(789)) << "ok" << 1.0));
+ << BSON_ARRAY(CursorId(456) << CursorId(6))
+ << "cursorsUnknown"
+ << BSON_ARRAY(CursorId(789))
+ << "ok"
+ << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -76,11 +83,13 @@ TEST(KillCursorsResponseTest, parseFromBSONCommandNotOk) {
}
TEST(KillCursorsResponseTest, parseFromBSONFieldNotArray) {
- StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(BSON(
- "cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << "foobar"
- << "cursorsAlive" << BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "ok" << 1.0));
+ StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
+ BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
+ << "foobar"
+ << "cursorsAlive"
+ << BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
+ << "ok"
+ << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -88,8 +97,11 @@ TEST(KillCursorsResponseTest, parseFromBSONFieldNotArray) {
TEST(KillCursorsResponseTest, parseFromBSONArrayContainsInvalidElement) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
- << BSON_ARRAY(CursorId(7) << "foobar" << CursorId(9)) << "ok" << 1.0));
+ << BSON_ARRAY(CursorId(456) << CursorId(6))
+ << "cursorsAlive"
+ << BSON_ARRAY(CursorId(7) << "foobar" << CursorId(9))
+ << "ok"
+ << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -103,9 +115,13 @@ TEST(KillCursorsResponseTest, toBSON) {
BSONObj responseObj = response.toBSON();
BSONObj expectedResponse =
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
+ << BSON_ARRAY(CursorId(456) << CursorId(6))
+ << "cursorsAlive"
<< BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "cursorsUnknown" << BSONArray() << "ok" << 1.0);
+ << "cursorsUnknown"
+ << BSONArray()
+ << "ok"
+ << 1.0);
ASSERT_EQ(responseObj, expectedResponse);
}
diff --git a/src/mongo/db/query/lite_parsed_query.cpp b/src/mongo/db/query/lite_parsed_query.cpp
index 00ad7091397..4e7e036a4cc 100644
--- a/src/mongo/db/query/lite_parsed_query.cpp
+++ b/src/mongo/db/query/lite_parsed_query.cpp
@@ -351,7 +351,9 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
} else if (!str::startsWith(fieldName, '$')) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
- << "Unrecognized field '" << fieldName << "'.");
+ << "Unrecognized field '"
+ << fieldName
+ << "'.");
}
}
@@ -553,32 +555,32 @@ Status LiteParsedQuery::validate() const {
if (_limit && *_limit < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "Limit value must be non-negative, but received: " << *_limit);
+ str::stream() << "Limit value must be non-negative, but received: "
+ << *_limit);
}
if (_batchSize && *_batchSize < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "BatchSize value must be non-negative, but received: " << *_batchSize);
+ str::stream() << "BatchSize value must be non-negative, but received: "
+ << *_batchSize);
}
if (_ntoreturn && *_ntoreturn < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "NToReturn value must be non-negative, but received: " << *_ntoreturn);
+ str::stream() << "NToReturn value must be non-negative, but received: "
+ << *_ntoreturn);
}
if (_maxScan < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "MaxScan value must be non-negative, but received: " << _maxScan);
+ str::stream() << "MaxScan value must be non-negative, but received: "
+ << _maxScan);
}
if (_maxTimeMS < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "MaxTimeMS value must be non-negative, but received: " << _maxTimeMS);
+ str::stream() << "MaxTimeMS value must be non-negative, but received: "
+ << _maxTimeMS);
}
if (_tailable) {
@@ -618,9 +620,10 @@ StatusWith<int> LiteParsedQuery::parseMaxTimeMS(BSONElement maxTimeMSElt) {
}
double maxTimeMSDouble = maxTimeMSElt.numberDouble();
if (maxTimeMSElt.type() == mongo::NumberDouble && floor(maxTimeMSDouble) != maxTimeMSDouble) {
- return StatusWith<int>(ErrorCodes::BadValue,
- (StringBuilder() << maxTimeMSElt.fieldNameStringData()
- << " has non-integral value").str());
+ return StatusWith<int>(
+ ErrorCodes::BadValue,
+ (StringBuilder() << maxTimeMSElt.fieldNameStringData() << " has non-integral value")
+ .str());
}
return StatusWith<int>(static_cast<int>(maxTimeMSLongLong));
}
diff --git a/src/mongo/db/query/lite_parsed_query_test.cpp b/src/mongo/db/query/lite_parsed_query_test.cpp
index d94ec230cb6..17affe6f9c3 100644
--- a/src/mongo/db/query/lite_parsed_query_test.cpp
+++ b/src/mongo/db/query/lite_parsed_query_test.cpp
@@ -346,9 +346,9 @@ TEST(LiteParsedQueryTest, ValidateSortOrder) {
ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: 1}}")));
ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"image\"}}")));
ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$world: \"textScore\"}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson(
- "{a: {$meta: \"textScore\","
- " b: 1}}")));
+ ASSERT_FALSE(
+ LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"textScore\","
+ " b: 1}}")));
ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': 1}")));
ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': -1}")));
}
diff --git a/src/mongo/db/query/parsed_projection_test.cpp b/src/mongo/db/query/parsed_projection_test.cpp
index 90f1b9bfb5c..b1b1b59324f 100644
--- a/src/mongo/db/query/parsed_projection_test.cpp
+++ b/src/mongo/db/query/parsed_projection_test.cpp
@@ -28,11 +28,11 @@
#include "mongo/db/query/parsed_projection.h"
-#include <memory>
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
#include "mongo/unittest/unittest.h"
+#include <memory>
namespace {
@@ -56,8 +56,10 @@ unique_ptr<ParsedProjection> createParsedProjection(const BSONObj& query, const
Status status = ParsedProjection::make(
projObj, queryMatchExpr.get(), &out, ExtensionsCallbackDisallowExtensions());
if (!status.isOK()) {
- FAIL(mongoutils::str::stream() << "failed to parse projection " << projObj
- << " (query: " << query << "): " << status.toString());
+ FAIL(mongoutils::str::stream() << "failed to parse projection " << projObj << " (query: "
+ << query
+ << "): "
+ << status.toString());
}
ASSERT(out);
return unique_ptr<ParsedProjection>(out);
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index ab64b8f1d26..ca1f93d9d6f 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -32,19 +32,19 @@
#include "mongo/db/query/plan_cache.h"
-#include <algorithm>
-#include <math.h>
-#include <memory>
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
#include "mongo/db/matcher/expression_array.h"
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/query/plan_ranker.h"
-#include "mongo/db/query/query_solution.h"
#include "mongo/db/query/query_knobs.h"
+#include "mongo/db/query/query_solution.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
+#include <algorithm>
+#include <math.h>
+#include <memory>
namespace mongo {
namespace {
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 02ab8ef64cb..332e7b79cea 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -28,8 +28,8 @@
#pragma once
-#include <set>
#include <boost/optional/optional.hpp>
+#include <set>
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/query/canonical_query.h"
diff --git a/src/mongo/db/query/plan_cache_indexability.cpp b/src/mongo/db/query/plan_cache_indexability.cpp
index 4e61e9ba595..066d7a2782b 100644
--- a/src/mongo/db/query/plan_cache_indexability.cpp
+++ b/src/mongo/db/query/plan_cache_indexability.cpp
@@ -30,14 +30,14 @@
#include "mongo/db/query/plan_cache_indexability.h"
-#include <memory>
#include "mongo/base/init.h"
#include "mongo/base/owned_pointer_vector.h"
-#include "mongo/db/query/index_entry.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_algo.h"
#include "mongo/db/matcher/expression_leaf.h"
+#include "mongo/db/query/index_entry.h"
#include "mongo/stdx/memory.h"
+#include <memory>
namespace mongo {
diff --git a/src/mongo/db/query/plan_cache_indexability_test.cpp b/src/mongo/db/query/plan_cache_indexability_test.cpp
index bfc1d786878..e5db935d3a3 100644
--- a/src/mongo/db/query/plan_cache_indexability_test.cpp
+++ b/src/mongo/db/query/plan_cache_indexability_test.cpp
@@ -42,8 +42,8 @@ std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj) {
StatusWithMatchExpression status =
MatchExpressionParser::parse(obj, ExtensionsCallbackDisallowExtensions(), collator);
if (!status.isOK()) {
- FAIL(str::stream() << "failed to parse query: " << obj.toString()
- << ". Reason: " << status.getStatus().toString());
+ FAIL(str::stream() << "failed to parse query: " << obj.toString() << ". Reason: "
+ << status.getStatus().toString());
}
return std::move(status.getValue());
}
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 4192f748362..708ff2b69e6 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -33,8 +33,8 @@
#include "mongo/db/query/plan_cache.h"
#include <algorithm>
-#include <ostream>
#include <memory>
+#include <ostream>
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
@@ -318,9 +318,9 @@ TEST(PlanCacheTest, ShouldNotCacheQueryWithMax) {
* the planner is able to come up with a cacheable solution.
*/
TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{a: {$geoWithin: "
- "{$box: [[-180, -90], [180, 90]]}}}"));
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{a: {$geoWithin: "
+ "{$box: [[-180, -90], [180, 90]]}}}"));
assertShouldCacheQuery(*cq);
}
@@ -328,10 +328,10 @@ TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
* $geoWithin queries with GeoJSON coordinates are supported by the index bounds builder.
*/
TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{a: {$geoWithin: "
- "{$geometry: {type: 'Polygon', coordinates: "
- "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{a: {$geoWithin: "
+ "{$geometry: {type: 'Polygon', coordinates: "
+ "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
assertShouldCacheQuery(*cq);
}
@@ -339,11 +339,11 @@ TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
* $geoWithin queries with both legacy and GeoJSON coordinates are cacheable.
*/
TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{$or: [{a: {$geoWithin: {$geometry: {type: 'Polygon', "
- "coordinates: [[[0, 0], [0, 90], "
- "[90, 0], [0, 0]]]}}}},"
- "{a: {$geoWithin: {$box: [[-180, -90], [180, 90]]}}}]}"));
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{$or: [{a: {$geoWithin: {$geometry: {type: 'Polygon', "
+ "coordinates: [[[0, 0], [0, 90], "
+ "[90, 0], [0, 0]]]}}}},"
+ "{a: {$geoWithin: {$box: [[-180, -90], [180, 90]]}}}]}"));
assertShouldCacheQuery(*cq);
}
@@ -351,10 +351,10 @@ TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
* $geoIntersects queries are always cacheable because they support GeoJSON coordinates only.
*/
TEST(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{a: {$geoIntersects: "
- "{$geometry: {type: 'Point', coordinates: "
- "[10.0, 10.0]}}}}"));
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{a: {$geoIntersects: "
+ "{$geometry: {type: 'Point', coordinates: "
+ "[10.0, 10.0]}}}}"));
assertShouldCacheQuery(*cq);
}
@@ -363,9 +363,9 @@ TEST(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
* between flat and spherical queries.
*/
TEST(PlanCacheTest, ShouldNotCacheQueryWithGeoNear) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{a: {$geoNear: {$geometry: {type: 'Point',"
- "coordinates: [0,0]}, $maxDistance:100}}}"));
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{a: {$geoNear: {$geometry: {type: 'Point',"
+ "coordinates: [0,0]}, $maxDistance:100}}}"));
assertShouldCacheQuery(*cq);
}
@@ -1279,14 +1279,14 @@ TEST(PlanCacheTest, ComputeKeyGeoWithin) {
PlanCache planCache;
// Legacy coordinates.
- unique_ptr<CanonicalQuery> cqLegacy(canonicalize(
- "{a: {$geoWithin: "
- "{$box: [[-180, -90], [180, 90]]}}}"));
+ unique_ptr<CanonicalQuery> cqLegacy(
+ canonicalize("{a: {$geoWithin: "
+ "{$box: [[-180, -90], [180, 90]]}}}"));
// GeoJSON coordinates.
- unique_ptr<CanonicalQuery> cqNew(canonicalize(
- "{a: {$geoWithin: "
- "{$geometry: {type: 'Polygon', coordinates: "
- "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
+ unique_ptr<CanonicalQuery> cqNew(
+ canonicalize("{a: {$geoWithin: "
+ "{$geometry: {type: 'Polygon', coordinates: "
+ "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
ASSERT_NOT_EQUALS(planCache.computeKey(*cqLegacy), planCache.computeKey(*cqNew));
}
diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp
index c6fe53a1c37..4b843baabe4 100644
--- a/src/mongo/db/query/plan_enumerator.cpp
+++ b/src/mongo/db/query/plan_enumerator.cpp
@@ -32,8 +32,8 @@
#include <set>
-#include "mongo/db/query/indexability.h"
#include "mongo/db/query/index_tag.h"
+#include "mongo/db/query/indexability.h"
#include "mongo/util/log.h"
#include "mongo/util/string_map.h"
@@ -332,8 +332,7 @@ bool PlanEnumerator::getNext(MatchExpression** tree) {
sortUsingTags(*tree);
_root->resetTag();
- LOG(5) << "Enumerator: memo just before moving:" << endl
- << dumpMemo();
+ LOG(5) << "Enumerator: memo just before moving:" << endl << dumpMemo();
_done = nextMemo(memoIDForNode(_root));
return true;
}
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 84602dc3c29..4a121926b9e 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -40,8 +40,8 @@
#include "mongo/db/exec/subplan.h"
#include "mongo/db/exec/working_set.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/query/plan_yield_policy.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/record_fetcher.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/fail_point_service.h"
@@ -556,7 +556,8 @@ Status PlanExecutor::executePlan() {
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
return Status(ErrorCodes::OperationFailed,
str::stream() << "Exec error: " << WorkingSetCommon::toStatusString(obj)
- << ", state: " << PlanExecutor::statestr(state));
+ << ", state: "
+ << PlanExecutor::statestr(state));
}
invariant(PlanExecutor::IS_EOF == state);
diff --git a/src/mongo/db/query/plan_ranker.cpp b/src/mongo/db/query/plan_ranker.cpp
index d4f68eb0602..9836e980fbb 100644
--- a/src/mongo/db/query/plan_ranker.cpp
+++ b/src/mongo/db/query/plan_ranker.cpp
@@ -32,8 +32,8 @@
#include <algorithm>
#include <cmath>
-#include <vector>
#include <utility>
+#include <vector>
#include "mongo/db/query/plan_ranker.h"
diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp
index 9b28a933131..c3172d5387d 100644
--- a/src/mongo/db/query/planner_access.cpp
+++ b/src/mongo/db/query/planner_access.cpp
@@ -39,9 +39,9 @@
#include "mongo/db/matcher/expression_array.h"
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/matcher/expression_text.h"
-#include "mongo/db/query/indexability.h"
#include "mongo/db/query/index_bounds_builder.h"
#include "mongo/db/query/index_tag.h"
+#include "mongo/db/query/indexability.h"
#include "mongo/db/query/query_knobs.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_planner_common.h"
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index b789395cb44..1a507d16ef2 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -33,12 +33,12 @@
#include <set>
#include <vector>
-#include "mongo/db/jsobj.h"
#include "mongo/db/index/expression_params.h"
#include "mongo/db/index/s2_common.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression_geo.h"
-#include "mongo/db/query/query_planner_common.h"
#include "mongo/db/query/query_planner.h"
+#include "mongo/db/query/query_planner_common.h"
#include "mongo/util/log.h"
namespace mongo {
diff --git a/src/mongo/db/query/planner_analysis_test.cpp b/src/mongo/db/query/planner_analysis_test.cpp
index 78b82845afa..c02c9c25cfb 100644
--- a/src/mongo/db/query/planner_analysis_test.cpp
+++ b/src/mongo/db/query/planner_analysis_test.cpp
@@ -90,16 +90,14 @@ TEST(QueryPlannerAnalysis, GetSortPatternSpecialIndexTypes) {
ASSERT_EQUALS(fromjson("{a: 1}"),
QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text', c: 1}")));
ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson(
- "{a: 1, b: '2dsphere',"
- " c: 1}")));
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: '2dsphere',"
+ " c: 1}")));
ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text'}")));
ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson(
- "{a: 1, b: 1, c: 'text',"
- " d: 1}")));
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text',"
+ " d: 1}")));
}
// Test the generation of sort orders provided by an index scan done by
diff --git a/src/mongo/db/query/planner_ixselect.cpp b/src/mongo/db/query/planner_ixselect.cpp
index 8af46a211af..7c32d516260 100644
--- a/src/mongo/db/query/planner_ixselect.cpp
+++ b/src/mongo/db/query/planner_ixselect.cpp
@@ -40,8 +40,8 @@
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/matcher/expression_text.h"
#include "mongo/db/query/collation/collator_interface.h"
-#include "mongo/db/query/indexability.h"
#include "mongo/db/query/index_tag.h"
+#include "mongo/db/query/indexability.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/query/planner_ixselect_test.cpp b/src/mongo/db/query/planner_ixselect_test.cpp
index 14c46e47970..098c921e5bd 100644
--- a/src/mongo/db/query/planner_ixselect_test.cpp
+++ b/src/mongo/db/query/planner_ixselect_test.cpp
@@ -32,7 +32,6 @@
#include "mongo/db/query/planner_ixselect.h"
-#include <memory>
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
@@ -40,6 +39,7 @@
#include "mongo/db/query/index_tag.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/text.h"
+#include <memory>
using namespace mongo;
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 6560e54a125..faad7ca6aae 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -40,10 +40,10 @@
#include "mongo/db/matcher/expression_text.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/plan_cache.h"
+#include "mongo/db/query/plan_enumerator.h"
#include "mongo/db/query/planner_access.h"
#include "mongo/db/query/planner_analysis.h"
#include "mongo/db/query/planner_ixselect.h"
-#include "mongo/db/query/plan_enumerator.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/util/log.h"
@@ -398,8 +398,7 @@ Status QueryPlanner::planFromCache(const CanonicalQuery& query,
// The planner requires a defined sort order.
sortUsingTags(clone.get());
- LOG(5) << "Tagged tree:" << endl
- << clone->toString();
+ LOG(5) << "Tagged tree:" << endl << clone->toString();
// Use the cached index assignments to build solnRoot.
QuerySolutionNode* solnRoot = QueryPlannerAccess::buildIndexedDataAccess(
@@ -415,8 +414,8 @@ Status QueryPlanner::planFromCache(const CanonicalQuery& query,
QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
if (!soln) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "Failed to analyze plan from cache. Query: " << query.toStringShort());
+ str::stream() << "Failed to analyze plan from cache. Query: "
+ << query.toStringShort());
}
LOG(5) << "Planner: solution constructed from the cache:\n" << soln->toString();
@@ -677,8 +676,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
}
// query.root() is now annotated with RelevantTag(s).
- LOG(5) << "Rated tree:" << endl
- << query.root()->toString();
+ LOG(5) << "Rated tree:" << endl << query.root()->toString();
// If there is a GEO_NEAR it must have an index it can use directly.
const MatchExpression* gnNode = NULL;
@@ -744,8 +742,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
MatchExpression* rawTree;
while (isp.getNext(&rawTree) && (out->size() < params.maxIndexedSolutions)) {
- LOG(5) << "About to build solntree from tagged tree:" << endl
- << rawTree->toString();
+ LOG(5) << "About to build solntree from tagged tree:" << endl << rawTree->toString();
// The tagged tree produced by the plan enumerator is not guaranteed
// to be canonically sorted. In order to be compatible with the cached
@@ -771,8 +768,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
if (NULL != soln) {
- LOG(5) << "Planner: adding solution:" << endl
- << soln->toString();
+ LOG(5) << "Planner: adding solution:" << endl << soln->toString();
if (indexTreeStatus.isOK()) {
SolutionCacheData* scd = new SolutionCacheData();
scd->tree.reset(autoData.release());
@@ -918,8 +914,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
scd->solnType = SolutionCacheData::COLLSCAN_SOLN;
collscan->cacheData.reset(scd);
out->push_back(collscan);
- LOG(5) << "Planner: outputting a collscan:" << endl
- << collscan->toString();
+ LOG(5) << "Planner: outputting a collscan:" << endl << collscan->toString();
}
}
diff --git a/src/mongo/db/query/query_planner_array_test.cpp b/src/mongo/db/query/query_planner_array_test.cpp
index d5f7fa741a3..3bc52263b13 100644
--- a/src/mongo/db/query/query_planner_array_test.cpp
+++ b/src/mongo/db/query/query_planner_array_test.cpp
@@ -92,9 +92,9 @@ TEST_F(QueryPlannerTest, AllElemMatchCompound) {
// true means multikey
addIndex(BSON("d" << 1 << "a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson(
- "{d: 1, a: {$all: [{$elemMatch: {b: 2, c: 2}},"
- "{$elemMatch: {b: 3, c: 3}}]}}"));
+ runQuery(
+ fromjson("{d: 1, a: {$all: [{$elemMatch: {b: 2, c: 2}},"
+ "{$elemMatch: {b: 3, c: 3}}]}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -121,9 +121,9 @@ TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild) {
TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild2) {
// true means multikey
addIndex(BSON("a.b.c.d" << 1), true);
- runQuery(fromjson(
- "{'a.b': {$elemMatch: {c: {$all: "
- "[{$elemMatch: {d: {$gt: 1, $lt: 3}}}]}}}}"));
+ runQuery(
+ fromjson("{'a.b': {$elemMatch: {c: {$all: "
+ "[{$elemMatch: {d: {$gt: 1, $lt: 3}}}]}}}}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -202,9 +202,9 @@ TEST_F(QueryPlannerTest, ElemMatchNested) {
TEST_F(QueryPlannerTest, TwoElemMatchNested) {
addIndex(BSON("a.d.e" << 1));
addIndex(BSON("a.b.c" << 1));
- runQuery(fromjson(
- "{ a:{ $elemMatch:{ d:{ $elemMatch:{ e:{ $lte:1 } } },"
- "b:{ $elemMatch:{ c:{ $gte:1 } } } } } }"));
+ runQuery(
+ fromjson("{ a:{ $elemMatch:{ d:{ $elemMatch:{ e:{ $lte:1 } } },"
+ "b:{ $elemMatch:{ c:{ $gte:1 } } } } } }"));
ASSERT_EQUALS(getNumSolutions(), 3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -486,9 +486,9 @@ TEST_F(QueryPlannerTest, MultikeyNestedElemMatchIn) {
// The bounds can be compounded because the index is not multikey.
TEST_F(QueryPlannerTest, TwoNestedElemMatchBounds) {
addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1));
- runQuery(fromjson(
- "{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
- "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
+ runQuery(
+ fromjson("{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
+ "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -505,9 +505,9 @@ TEST_F(QueryPlannerTest, TwoNestedElemMatchBounds) {
TEST_F(QueryPlannerTest, MultikeyTwoNestedElemMatchBounds) {
// true means multikey
addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1), true);
- runQuery(fromjson(
- "{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
- "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
+ runQuery(
+ fromjson("{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
+ "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -535,9 +535,9 @@ TEST_F(QueryPlannerTest, MultikeyElemMatchValue) {
// the index is not multikey.
TEST_F(QueryPlannerTest, ElemMatchIntersectBoundsNotMultikey) {
addIndex(BSON("a.b" << 1));
- runQuery(fromjson(
- "{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
- "'a.b': {$in: [2,5]}}"));
+ runQuery(
+ fromjson("{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
+ "'a.b': {$in: [2,5]}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -554,9 +554,9 @@ TEST_F(QueryPlannerTest, ElemMatchIntersectBoundsNotMultikey) {
TEST_F(QueryPlannerTest, ElemMatchIntersectBoundsMultikey) {
// true means multikey
addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson(
- "{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
- "'a.b': {$in: [2,5]}}"));
+ runQuery(
+ fromjson("{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
+ "'a.b': {$in: [2,5]}}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -848,9 +848,9 @@ TEST_F(QueryPlannerTest, MultikeyDoubleDottedElemMatchOnDotted) {
TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted) {
// true means multikey
addIndex(BSON("a.b.c" << 1 << "a.e.f" << 1 << "a.b.d" << 1 << "a.e.g" << 1), true);
- runQuery(fromjson(
- "{'a.b': {$elemMatch: {c: 1, d: 1}}, "
- "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
+ runQuery(
+ fromjson("{'a.b': {$elemMatch: {c: 1, d: 1}}, "
+ "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -866,9 +866,9 @@ TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted) {
TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted2) {
// true means multikey
addIndex(BSON("a.b.c" << 1 << "a.e.c" << 1 << "a.b.d" << 1 << "a.e.d" << 1), true);
- runQuery(fromjson(
- "{'a.b': {$elemMatch: {c: 1, d: 1}}, "
- "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
+ runQuery(
+ fromjson("{'a.b': {$elemMatch: {c: 1, d: 1}}, "
+ "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -889,9 +889,9 @@ TEST_F(QueryPlannerTest, ElemMatchIndexIntersection) {
addIndex(BSON("a.b.startDate" << 1), true);
addIndex(BSON("a.b.endDate" << 1), true);
- runQuery(fromjson(
- "{shortId: 3, 'a.b': {$elemMatch: {startDate: {$lte: 3},"
- "endDate: {$gt: 6}}}}"));
+ runQuery(
+ fromjson("{shortId: 3, 'a.b': {$elemMatch: {startDate: {$lte: 3},"
+ "endDate: {$gt: 6}}}}"));
assertNumSolutions(6U);
@@ -1071,9 +1071,9 @@ TEST_F(QueryPlannerTest, MultikeyElemMatchAll) {
// SERVER-16042
TEST_F(QueryPlannerTest, MultikeyElemMatchAllCompound) {
addIndex(BSON("a.b" << 1 << "c" << 1), true);
- runQuery(fromjson(
- "{a: {$all: [{$elemMatch: {b: {$gt: 1}}}, "
- "{$elemMatch: {b: {$lt: 0}}}]}, c: 3}"));
+ runQuery(
+ fromjson("{a: {$all: [{$elemMatch: {b: {$gt: 1}}}, "
+ "{$elemMatch: {b: {$lt: 0}}}]}, c: 3}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -1090,9 +1090,9 @@ TEST_F(QueryPlannerTest, MultikeyElemMatchAllCompound) {
// SERVER-16042
TEST_F(QueryPlannerTest, MultikeyElemMatchAllCompound2) {
addIndex(BSON("a.b" << 1 << "c" << 1), true);
- runQuery(fromjson(
- "{a: {$all: [{$elemMatch: {b: {$gt: 1}}}, "
- "{$elemMatch: {b: {$lt: 0}}}]}, c: {$gte: 3, $lte: 4}}"));
+ runQuery(
+ fromjson("{a: {$all: [{$elemMatch: {b: {$gt: 1}}}, "
+ "{$elemMatch: {b: {$lt: 0}}}]}, c: {$gte: 3, $lte: 4}}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -1348,9 +1348,9 @@ TEST_F(QueryPlannerTest, CannotIntersectBoundsOfTwoSeparateElemMatches) {
MultikeyPaths multikeyPaths{{0U}, {0U}};
addIndex(BSON("a.b" << 1 << "a.c" << 1), multikeyPaths);
- runQuery(fromjson(
- "{$and: [{a: {$elemMatch: {b: {$gte: 0}, c: {$lt: 20}}}}, "
- "{a: {$elemMatch: {b: {$lt: 10}, c: {$gte: 5}}}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$elemMatch: {b: {$gte: 0}, c: {$lt: 20}}}}, "
+ "{a: {$elemMatch: {b: {$lt: 10}, c: {$gte: 5}}}}]}"));
assertNumSolutions(3U);
assertSolutionExists(
diff --git a/src/mongo/db/query/query_planner_collation_test.cpp b/src/mongo/db/query/query_planner_collation_test.cpp
index 7f9d784e6c5..996c644e609 100644
--- a/src/mongo/db/query/query_planner_collation_test.cpp
+++ b/src/mongo/db/query/query_planner_collation_test.cpp
@@ -90,9 +90,9 @@ TEST_F(QueryPlannerTest, StringComparisonAndNonStringComparisonCanUseSeparateInd
// The string predicate can use index {a: 1}, since the collators match. The non-string
// comparison can use index {b: 1}, even though the collators don't match.
- runQueryAsCommand(fromjson(
- "{find: 'testns', filter: {a: {$lt: 'foo'}, b: {$lte: 4}}, collation: {locale: "
- "'reverse'}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'testns', filter: {a: {$lt: 'foo'}, b: {$lte: 4}}, collation: {locale: "
+ "'reverse'}}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -189,9 +189,9 @@ TEST_F(QueryPlannerTest, OrQueryResultsInCollscanWhenOnlyOneBranchHasIndexWithMa
addIndex(fromjson("{a: 1}"), &reverseStringCollator);
addIndex(fromjson("{b: 1}"), &alwaysEqualCollator);
- runQueryAsCommand(fromjson(
- "{find: 'testns', filter: {$or: [{a: 'foo'}, {b: 'bar'}]}, collation: {locale: "
- "'reverse'}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'testns', filter: {$or: [{a: 'foo'}, {b: 'bar'}]}, collation: {locale: "
+ "'reverse'}}"));
assertNumSolutions(1U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -202,9 +202,9 @@ TEST_F(QueryPlannerTest, OrQueryCanBeIndexedWhenBothBranchesHaveIndexWithMatchin
addIndex(fromjson("{a: 1}"), &collator);
addIndex(fromjson("{b: 1}"), &collator);
- runQueryAsCommand(fromjson(
- "{find: 'testns', filter: {$or: [{a: 'foo'}, {b: 'bar'}]}, collation: {locale: "
- "'reverse'}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'testns', filter: {$or: [{a: 'foo'}, {b: 'bar'}]}, collation: {locale: "
+ "'reverse'}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
diff --git a/src/mongo/db/query/query_planner_geo_test.cpp b/src/mongo/db/query/query_planner_geo_test.cpp
index f4218f20fb9..9e087444079 100644
--- a/src/mongo/db/query/query_planner_geo_test.cpp
+++ b/src/mongo/db/query/query_planner_geo_test.cpp
@@ -75,10 +75,10 @@ TEST_F(QueryPlannerTest, Basic2DSphereCompound) {
addIndex(BSON("loc"
<< "2dsphere"));
- runQuery(fromjson(
- "{loc:{$near:{$geometry:{type:'Point',"
- "coordinates : [-81.513743,28.369947] },"
- " $maxDistance :100}},a: 'mouse'}"));
+ runQuery(
+ fromjson("{loc:{$near:{$geometry:{type:'Point',"
+ "coordinates : [-81.513743,28.369947] },"
+ " $maxDistance :100}},a: 'mouse'}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {node: {geoNear2dsphere: {pattern: {loc: '2dsphere'}, "
@@ -88,11 +88,12 @@ TEST_F(QueryPlannerTest, Basic2DSphereCompound) {
TEST_F(QueryPlannerTest, Basic2DCompound) {
addIndex(BSON("loc"
<< "2d"
- << "a" << 1));
+ << "a"
+ << 1));
- runQuery(fromjson(
- "{ loc: { $geoWithin: { $box : [[0, 0],[10, 10]] } },"
- "a: 'mouse' }"));
+ runQuery(
+ fromjson("{ loc: { $geoWithin: { $box : [[0, 0],[10, 10]] } },"
+ "a: 'mouse' }"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists(
@@ -110,10 +111,10 @@ TEST_F(QueryPlannerTest, Multikey2DSphereCompound) {
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{loc:{$near:{$geometry:{type:'Point',"
- "coordinates : [-81.513743,28.369947] },"
- " $maxDistance :100}},a: 'mouse'}"));
+ runQuery(
+ fromjson("{loc:{$near:{$geometry:{type:'Point',"
+ "coordinates : [-81.513743,28.369947] },"
+ " $maxDistance :100}},a: 'mouse'}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {node: {geoNear2dsphere: {pattern: {loc: '2dsphere'}, "
@@ -125,9 +126,9 @@ TEST_F(QueryPlannerTest, Basic2DSphereNonNear) {
addIndex(BSON("a"
<< "2dsphere"));
- runQuery(fromjson(
- "{a: {$geoIntersects: {$geometry: {type: 'Point',"
- "coordinates: [10.0, 10.0]}}}}"));
+ runQuery(
+ fromjson("{a: {$geoIntersects: {$geometry: {type: 'Point',"
+ "coordinates: [10.0, 10.0]}}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
@@ -147,9 +148,9 @@ TEST_F(QueryPlannerTest, Multikey2DSphereNonNear) {
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{a: {$geoIntersects: {$geometry: {type: 'Point',"
- "coordinates: [10.0, 10.0]}}}}"));
+ runQuery(
+ fromjson("{a: {$geoIntersects: {$geometry: {type: 'Point',"
+ "coordinates: [10.0, 10.0]}}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
@@ -182,9 +183,9 @@ TEST_F(QueryPlannerTest, Basic2DSphereGeoNear) {
"{geoNear2dsphere: {pattern: {a: '2dsphere'}, "
"bounds: {a: [['MinKey', 'MaxKey', true, true]]}}}");
- runQuery(fromjson(
- "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}"));
+ runQuery(
+ fromjson("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{geoNear2dsphere: {pattern: {a: '2dsphere'}, "
@@ -204,9 +205,9 @@ TEST_F(QueryPlannerTest, Multikey2DSphereGeoNear) {
"{geoNear2dsphere: {pattern: {a: '2dsphere'}, "
"bounds: {a: [['MinKey', 'MaxKey', true, true]]}}}");
- runQuery(fromjson(
- "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}"));
+ runQuery(
+ fromjson("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{geoNear2dsphere: {pattern: {a: '2dsphere'}, "
@@ -277,9 +278,9 @@ TEST_F(QueryPlannerTest, GeoNearMultipleRelevantIndicesButOnlyOneCompatible) {
addIndex(BSON("b" << 1 << "a"
<< "2dsphere"));
- runQuery(fromjson(
- "{a: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0,0]}}},"
- " b: {$exists: false}}"));
+ runQuery(
+ fromjson("{a: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0,0]}}},"
+ " b: {$exists: false}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -294,9 +295,9 @@ TEST_F(QueryPlannerTest, Or2DNonNear) {
<< "2d"));
addIndex(BSON("b"
<< "2d"));
- runQuery(fromjson(
- "{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
+ runQuery(
+ fromjson("{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -309,9 +310,9 @@ TEST_F(QueryPlannerTest, Or2DNonNear) {
TEST_F(QueryPlannerTest, Or2DSameFieldNonNear) {
addIndex(BSON("a"
<< "2d"));
- runQuery(fromjson(
- "{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
+ runQuery(
+ fromjson("{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -344,10 +345,10 @@ TEST_F(QueryPlannerTest, Or2DSphereNonNearMultikey) {
addIndex(BSON("b"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{$or: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [10.0, 10.0]}}}},"
- " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}"));
+ runQuery(
+ fromjson("{$or: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [10.0, 10.0]}}}},"
+ " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -360,9 +361,9 @@ TEST_F(QueryPlannerTest, Or2DSphereNonNearMultikey) {
TEST_F(QueryPlannerTest, And2DSameFieldNonNear) {
addIndex(BSON("a"
<< "2d"));
- runQuery(fromjson(
- "{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
+ runQuery(
+ fromjson("{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -374,9 +375,9 @@ TEST_F(QueryPlannerTest, And2DSameFieldNonNear) {
TEST_F(QueryPlannerTest, And2DWith2DNearSameField) {
addIndex(BSON("a"
<< "2d"));
- runQuery(fromjson(
- "{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {a : { $near : [ 5, 5 ] } } ]}"));
+ runQuery(
+ fromjson("{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {a : { $near : [ 5, 5 ] } } ]}"));
// GEO_NEAR must use the index, and GEO predicate becomes a filter.
assertNumSolutions(1U);
@@ -388,9 +389,9 @@ TEST_F(QueryPlannerTest, And2DWith2DNearSameFieldMultikey) {
addIndex(BSON("geo"
<< "2d"),
multikey);
- runQuery(fromjson(
- "{$and: [{geo: {$near: [0, 0]}}, "
- "{geo: {$within: {$polygon: [[0, 0], [1, 0], [1, 1]]}}}]}"));
+ runQuery(
+ fromjson("{$and: [{geo: {$near: [0, 0]}}, "
+ "{geo: {$within: {$polygon: [[0, 0], [1, 0], [1, 1]]}}}]}"));
// GEO_NEAR must use the index, and GEO predicate becomes a filter.
assertNumSolutions(1U);
@@ -402,11 +403,11 @@ TEST_F(QueryPlannerTest, And2DWith2DNearSameFieldMultikey) {
TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNear) {
addIndex(BSON("a"
<< "2dsphere"));
- runQuery(fromjson(
- "{$and: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+ runQuery(
+ fromjson("{$and: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -420,11 +421,11 @@ TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNearMultikey) {
addIndex(BSON("a"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{$and: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+ runQuery(
+ fromjson("{$and: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -434,11 +435,11 @@ TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNearMultikey) {
TEST_F(QueryPlannerTest, And2DSphereWithNearSameField) {
addIndex(BSON("a"
<< "2dsphere"));
- runQuery(fromjson(
- "{$and: [{a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- "{a: {$near: {$geometry: "
- "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ "{a: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
// GEO_NEAR must use the index, and GEO predicate becomes a filter.
assertNumSolutions(1U);
@@ -450,11 +451,11 @@ TEST_F(QueryPlannerTest, And2DSphereWithNearSameFieldMultikey) {
addIndex(BSON("a"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{$and: [{a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- "{a: {$near: {$geometry: "
- "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ "{a: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
// GEO_NEAR must use the index, and GEO predicate becomes a filter.
assertNumSolutions(1U);
@@ -466,11 +467,11 @@ TEST_F(QueryPlannerTest, And2DSphereWithNearSameFieldMultikey) {
TEST_F(QueryPlannerTest, Or2DSphereSameFieldNonNear) {
addIndex(BSON("a"
<< "2dsphere"));
- runQuery(fromjson(
- "{$or: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+ runQuery(
+ fromjson("{$or: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -482,11 +483,11 @@ TEST_F(QueryPlannerTest, Or2DSphereSameFieldNonNearMultikey) {
addIndex(BSON("a"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{$or: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+ runQuery(
+ fromjson("{$or: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -498,9 +499,9 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNear) {
addIndex(BSON("a" << 1 << "b"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{a: {$gte: 0}, b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
+ runQuery(
+ fromjson("{a: {$gte: 0}, b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -513,9 +514,9 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearFetchRequired) {
addIndex(BSON("a" << 1 << "b"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{a: {$gte: 0, $lt: 5}, b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
+ runQuery(
+ fromjson("{a: {$gte: 0, $lt: 5}, b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -532,9 +533,9 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleIndices) {
addIndex(BSON("c" << 1 << "b"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{a: {$gte: 0}, c: 3, b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
+ runQuery(
+ fromjson("{a: {$gte: 0}, c: 3, b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
assertNumSolutions(2U);
assertSolutionExists(
@@ -552,9 +553,9 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleLeadingFields) {
addIndex(BSON("a" << 1 << "b" << 1 << "c"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{a: {$lt: 5, $gt: 1}, b: 6, c: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
+ runQuery(
+ fromjson("{a: {$lt: 5, $gt: 1}, b: 6, c: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -569,10 +570,10 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleGeoPreds) {
addIndex(BSON("a" << 1 << "b" << 1 << "c"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{a: 1, b: 6, $and: ["
- "{c: {$near: {$geometry: {type: 'Point', coordinates: [2, 2]}}}},"
- "{c: {$geoWithin: {$box: [ [1, 1], [3, 3] ] } } } ] }"));
+ runQuery(
+ fromjson("{a: 1, b: 6, $and: ["
+ "{c: {$near: {$geometry: {type: 'Point', coordinates: [2, 2]}}}},"
+ "{c: {$geoWithin: {$box: [ [1, 1], [3, 3] ] } } } ] }"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -585,12 +586,15 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearCompoundTest) {
// true means multikey
addIndex(BSON("a" << 1 << "b"
<< "2dsphere"
- << "c" << 1 << "d" << 1),
+ << "c"
+ << 1
+ << "d"
+ << 1),
true);
- runQuery(fromjson(
- "{a: {$gte: 0}, c: {$gte: 0, $lt: 4}, d: {$gt: 1, $lt: 5},"
- "b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
+ runQuery(
+ fromjson("{a: {$gte: 0}, c: {$gte: 0, $lt: 4}, d: {$gt: 1, $lt: 5},"
+ "b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -604,7 +608,8 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DNear) {
// true means multikey
addIndex(BSON("a"
<< "2d"
- << "b" << 1),
+ << "b"
+ << 1),
true);
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$gte: 0}}"));
@@ -698,9 +703,8 @@ TEST_F(QueryPlannerTest, CantUseNonCompoundGeoIndexToProvideSort) {
TEST_F(QueryPlannerTest, CantUseNonCompoundGeoIndexToProvideSortWithIndexablePred) {
addIndex(BSON("x"
<< "2dsphere"));
- runQuerySortProj(fromjson(
- "{x: {$geoIntersects: {$geometry: {type: 'Point',"
- " coordinates: [0, 0]}}}}"),
+ runQuerySortProj(fromjson("{x: {$geoIntersects: {$geometry: {type: 'Point',"
+ " coordinates: [0, 0]}}}}"),
BSON("x" << 1),
BSONObj());
@@ -727,9 +731,8 @@ TEST_F(QueryPlannerTest, CantUseCompoundGeoIndexToProvideSortIfNoGeoPred) {
TEST_F(QueryPlannerTest, CanUseCompoundGeoIndexToProvideSortWithGeoPred) {
addIndex(BSON("x" << 1 << "y"
<< "2dsphere"));
- runQuerySortProj(fromjson(
- "{x: 1, y: {$geoIntersects: {$geometry: {type: 'Point',"
- " coordinates: [0, 0]}}}}"),
+ runQuerySortProj(fromjson("{x: 1, y: {$geoIntersects: {$geometry: {type: 'Point',"
+ " coordinates: [0, 0]}}}}"),
BSON("x" << 1),
BSONObj());
@@ -768,19 +771,19 @@ TEST_F(QueryPlannerTest, Negation2DSphereGeoNear) {
addIndex(BSON("a"
<< "2dsphere"));
- runQuery(fromjson(
- "{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
- "{b: {$ne: 1}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
+ "{b: {$ne: 1}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {node: {geoNear2dsphere: {pattern: {a: '2dsphere'}, "
"bounds: {a: [['MinKey', 'MaxKey', true, true]]}}}}}");
- runQuery(fromjson(
- "{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
- "coordinates: [0, 0]},"
- "$maxDistance: 100}}},"
- "{b: {$ne: 1}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
+ "coordinates: [0, 0]},"
+ "$maxDistance: 100}}},"
+ "{b: {$ne: 1}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {node: {geoNear2dsphere: {pattern: {a: '2dsphere'}, "
@@ -798,19 +801,19 @@ TEST_F(QueryPlannerTest, Negation2DSphereGeoNearMultikey) {
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
- "{b: {$ne: 1}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
+ "{b: {$ne: 1}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {node: {geoNear2dsphere: {pattern: {a: '2dsphere'}, "
"bounds: {a: [['MinKey', 'MaxKey', true, true]]}}}}}");
- runQuery(fromjson(
- "{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
- "coordinates: [0, 0]},"
- "$maxDistance: 100}}},"
- "{b: {$ne: 1}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
+ "coordinates: [0, 0]},"
+ "$maxDistance: 100}}},"
+ "{b: {$ne: 1}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {node: {geoNear2dsphere: {pattern: {a: '2dsphere'}, "
@@ -1065,10 +1068,10 @@ TEST_F(QueryPlannerGeo2dsphereTest, CannotIntersectBoundsOfTwoSeparateElemMatche
<< "2dsphere"),
multikeyPaths);
- runQuery(fromjson(
- "{$and: [{a: {$elemMatch: {b: {$gte: 0}, c: {$lt: 20}}}}, "
- "{a: {$elemMatch: {b: {$lt: 10}, c: {$gte: 5}}}}, "
- "{'a.geo': {$nearSphere: [0, 0]}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$elemMatch: {b: {$gte: 0}, c: {$lt: 20}}}}, "
+ "{a: {$elemMatch: {b: {$lt: 10}, c: {$gte: 5}}}}, "
+ "{'a.geo': {$nearSphere: [0, 0]}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -1096,7 +1099,10 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{1U}, {1U}, {1U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b" << 1 << "a.c" << 1),
+ << "a.b"
+ << 1
+ << "a.c"
+ << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, 'a.b': 2, 'a.c': 3}"));
@@ -1126,7 +1132,10 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U}, {0U}, {0U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b" << 1 << "a.c" << 1),
+ << "a.b"
+ << 1
+ << "a.c"
+ << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, 'a.b': 2, 'a.c': 3}"));
@@ -1157,7 +1166,10 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U}, {0U}, {0U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b" << 1 << "a.c" << 1),
+ << "a.b"
+ << 1
+ << "a.c"
+ << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, a: {$elemMatch: {b: 2, c: 3}}}"));
@@ -1189,7 +1201,10 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U, 1U}, {0U, 1U}, {0U, 1U}};
addIndex(BSON("a.b.geo"
<< "2dsphere"
- << "a.b.c" << 1 << "a.b.d" << 1),
+ << "a.b.c"
+ << 1
+ << "a.b.d"
+ << 1),
multikeyPaths);
runQuery(fromjson("{'a.b.geo': {$nearSphere: [0, 0]}, a: {$elemMatch: {'b.c': 2, 'b.d': 3}}}"));
@@ -1207,9 +1222,9 @@ TEST_F(QueryPlannerGeo2dsphereTest, CanIntersectBoundsOn2dsphereFieldWhenItIsNot
addIndex(BSON("geo"
<< "2dsphere"),
multikeyPaths);
- runQuery(fromjson(
- "{$and: [{geo: {$nearSphere: [0, 0]}}, "
- "{geo: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}]}"));
+ runQuery(
+ fromjson("{$and: [{geo: {$nearSphere: [0, 0]}}, "
+ "{geo: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -1223,9 +1238,9 @@ TEST_F(QueryPlannerGeo2dsphereTest, CannotIntersectBoundsOn2dsphereFieldWhenItIs
addIndex(BSON("geo"
<< "2dsphere"),
multikeyPaths);
- runQuery(fromjson(
- "{$and: [{geo: {$nearSphere: [0, 0]}}, "
- "{geo: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}]}"));
+ runQuery(
+ fromjson("{$and: [{geo: {$nearSphere: [0, 0]}}, "
+ "{geo: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -1353,7 +1368,8 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDNearCompound) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("geo"
<< "2dsphere"
- << "nongeo" << 1)};
+ << "nongeo"
+ << 1)};
BSONObj predicate = fromjson("{geo: {$nearSphere: [-71.34895, 42.46037]}}");
testMultiple2dsphereIndexVersions(versions, keyPatterns, predicate, 1U);
}
@@ -1364,10 +1380,16 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDSphereSparseBelowOr) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("geo1"
<< "2dsphere"
- << "a" << 1 << "b" << 1),
+ << "a"
+ << 1
+ << "b"
+ << 1),
BSON("geo2"
<< "2dsphere"
- << "a" << 1 << "b" << 1)};
+ << "a"
+ << 1
+ << "b"
+ << 1)};
BSONObj predicate = fromjson(
"{a: 4, b: 5, $or: ["
@@ -1389,7 +1411,8 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDSphereSparseBelowElemMatch) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("a.b"
<< "2dsphere"
- << "a.c" << 1)};
+ << "a.c"
+ << 1)};
BSONObj predicate = fromjson(
"{a: {$elemMatch: {b: {$geoWithin: {$centerSphere: [[10,20], 0.01]}},"
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index 108d4002195..de70d351035 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -666,9 +666,9 @@ TEST_F(QueryPlannerTest, OrOfAnd3) {
// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
TEST_F(QueryPlannerTest, OrOfAnd4) {
addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson(
- "{$or: [{a:{$gt:1,$lt:5}, b:{$gt:0,$lt:3}, c:6}, "
- "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
+ runQuery(
+ fromjson("{$or: [{a:{$gt:1,$lt:5}, b:{$gt:0,$lt:3}, c:6}, "
+ "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -684,9 +684,9 @@ TEST_F(QueryPlannerTest, OrOfAnd4) {
// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
TEST_F(QueryPlannerTest, OrOfAnd5) {
addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson(
- "{$or: [{a:{$gt:1,$lt:5}, c:6}, "
- "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
+ runQuery(
+ fromjson("{$or: [{a:{$gt:1,$lt:5}, c:6}, "
+ "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -870,9 +870,9 @@ TEST_F(QueryPlannerTest, OrInexactWithExact2) {
// SERVER-13960: an exact, inexact covered, and inexact fetch predicate.
TEST_F(QueryPlannerTest, OrAllThreeTightnesses) {
addIndex(BSON("names" << 1));
- runQuery(fromjson(
- "{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/},"
- "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
+ runQuery(
+ fromjson("{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/},"
+ "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -887,9 +887,9 @@ TEST_F(QueryPlannerTest, OrAllThreeTightnesses) {
TEST_F(QueryPlannerTest, OrTwoInexactFetch) {
// true means multikey
addIndex(BSON("names" << 1), true);
- runQuery(fromjson(
- "{$or: [{names: {$elemMatch: {$eq: 'alexandra'}}},"
- "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
+ runQuery(
+ fromjson("{$or: [{names: {$elemMatch: {$eq: 'alexandra'}}},"
+ "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -917,9 +917,9 @@ TEST_F(QueryPlannerTest, OrInexactCoveredMultikey) {
TEST_F(QueryPlannerTest, OrElemMatchObject) {
// true means multikey
addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson(
- "{$or: [{a: {$elemMatch: {b: {$lte: 1}}}},"
- "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
+ runQuery(
+ fromjson("{$or: [{a: {$elemMatch: {b: {$lte: 1}}}},"
+ "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -935,9 +935,9 @@ TEST_F(QueryPlannerTest, OrElemMatchObject) {
TEST_F(QueryPlannerTest, OrElemMatchObjectBeneathAnd) {
// true means multikey
addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson(
- "{$or: [{'a.b': 0, a: {$elemMatch: {b: {$lte: 1}}}},"
- "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
+ runQuery(
+ fromjson("{$or: [{'a.b': 0, a: {$elemMatch: {b: {$lte: 1}}}},"
+ "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -988,9 +988,9 @@ TEST_F(QueryPlannerTest, OrWithExactAndInexact) {
// SERVER-13960: $in with exact, inexact covered, and inexact fetch predicates.
TEST_F(QueryPlannerTest, OrWithExactAndInexact2) {
addIndex(BSON("name" << 1));
- runQuery(fromjson(
- "{$or: [{name: {$in: ['thomas', /^alexand(er|ra)/]}},"
- "{name: {$exists: false}}]}"));
+ runQuery(
+ fromjson("{$or: [{name: {$in: ['thomas', /^alexand(er|ra)/]}},"
+ "{name: {$exists: false}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -1005,9 +1005,9 @@ TEST_F(QueryPlannerTest, OrWithExactAndInexact2) {
TEST_F(QueryPlannerTest, OrWithExactAndInexact3) {
addIndex(BSON("a" << 1));
addIndex(BSON("b" << 1));
- runQuery(fromjson(
- "{$or: [{a: {$in: [/z/, /x/]}}, {a: 'w'},"
- "{b: {$exists: false}}, {b: {$in: ['p']}}]}"));
+ runQuery(
+ fromjson("{$or: [{a: {$in: [/z/, /x/]}}, {a: 'w'},"
+ "{b: {$exists: false}}, {b: {$in: ['p']}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -1420,7 +1420,8 @@ TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSortWithIndexablePred) {
TEST_F(QueryPlannerTest, CantUseTextIndexToProvideSort) {
addIndex(BSON("x" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
ASSERT_EQUALS(getNumSolutions(), 1U);
@@ -1766,10 +1767,9 @@ TEST_F(QueryPlannerTest, ManyInWithSort) {
// SERVER-1205
TEST_F(QueryPlannerTest, TooManyToExplode) {
addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
- runQuerySortProjSkipNToReturn(fromjson(
- "{a: {$in: [1,2,3,4,5,6]},"
- "b:{$in:[1,2,3,4,5,6,7,8]},"
- "c:{$in:[1,2,3,4,5,6,7,8]}}"),
+ runQuerySortProjSkipNToReturn(fromjson("{a: {$in: [1,2,3,4,5,6]},"
+ "b:{$in:[1,2,3,4,5,6,7,8]},"
+ "c:{$in:[1,2,3,4,5,6,7,8]}}"),
BSON("d" << 1),
BSONObj(),
0,
@@ -1962,11 +1962,10 @@ TEST_F(QueryPlannerTest, TooManyToExplodeOr) {
addIndex(BSON("b" << 1 << "e" << 1));
addIndex(BSON("c" << 1 << "e" << 1));
addIndex(BSON("d" << 1 << "e" << 1));
- runQuerySortProj(fromjson(
- "{$or: [{a: {$in: [1,2,3,4,5,6]},"
- "b: {$in: [1,2,3,4,5,6]}},"
- "{c: {$in: [1,2,3,4,5,6]},"
- "d: {$in: [1,2,3,4,5,6]}}]}"),
+ runQuerySortProj(fromjson("{$or: [{a: {$in: [1,2,3,4,5,6]},"
+ "b: {$in: [1,2,3,4,5,6]}},"
+ "{c: {$in: [1,2,3,4,5,6]},"
+ "d: {$in: [1,2,3,4,5,6]}}]}"),
BSON("e" << 1),
BSONObj());
@@ -2004,9 +2003,8 @@ TEST_F(QueryPlannerTest, TooManyToExplodeOr) {
TEST_F(QueryPlannerTest, ExplodeIxscanWithFilter) {
addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson(
- "{$and: [{b: {$regex: 'foo', $options: 'i'}},"
- "{a: {$in: [1, 2]}}]}"),
+ runQuerySortProj(fromjson("{$and: [{b: {$regex: 'foo', $options: 'i'}},"
+ "{a: {$in: [1, 2]}}]}"),
BSON("b" << 1),
BSONObj());
@@ -2106,9 +2104,9 @@ TEST_F(QueryPlannerTest, TwoPlansElemMatch) {
addIndex(BSON("a" << 1 << "b" << 1));
addIndex(BSON("arr.x" << 1 << "a" << 1));
- runQuery(fromjson(
- "{arr: { $elemMatch : { x : 5 , y : 5 } },"
- " a : 55 , b : { $in : [ 1 , 5 , 8 ] } }"));
+ runQuery(
+ fromjson("{arr: { $elemMatch : { x : 5 , y : 5 } },"
+ " a : 55 , b : { $in : [ 1 , 5 , 8 ] } }"));
// 2 indexed solns and one non-indexed
ASSERT_EQUALS(getNumSolutions(), 3U);
@@ -2763,9 +2761,9 @@ TEST_F(QueryPlannerTest, NegatedRangeIntGTE) {
TEST_F(QueryPlannerTest, TwoNegatedRanges) {
addIndex(BSON("i" << 1));
- runQuery(fromjson(
- "{$and: [{i: {$not: {$lte: 'b'}}}, "
- "{i: {$not: {$gte: 'f'}}}]}"));
+ runQuery(
+ fromjson("{$and: [{i: {$not: {$lte: 'b'}}}, "
+ "{i: {$not: {$gte: 'f'}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -3273,14 +3271,14 @@ TEST_F(QueryPlannerTest, IntersectCanBeVeryBig) {
addIndex(BSON("b" << 1));
addIndex(BSON("c" << 1));
addIndex(BSON("d" << 1));
- runQuery(fromjson(
- "{$or: [{ 'a' : null, 'b' : 94, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 98, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 1, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 2, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 7, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 9, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 16, 'c' : null, 'd' : null }]}"));
+ runQuery(
+ fromjson("{$or: [{ 'a' : null, 'b' : 94, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 98, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 1, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 2, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 7, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 9, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 16, 'c' : null, 'd' : null }]}"));
assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
}
@@ -3549,13 +3547,13 @@ TEST_F(QueryPlannerTest, OrEnumerationLimit) {
// 6 $or clauses, each with 2 indexed predicates
// means 2^6 = 64 possibilities. We should hit the limit.
- runQuery(fromjson(
- "{$or: [{a: 1, b: 1},"
- "{a: 2, b: 2},"
- "{a: 3, b: 3},"
- "{a: 4, b: 4},"
- "{a: 5, b: 5},"
- "{a: 6, b: 6}]}"));
+ runQuery(
+ fromjson("{$or: [{a: 1, b: 1},"
+ "{a: 2, b: 2},"
+ "{a: 3, b: 3},"
+ "{a: 4, b: 4},"
+ "{a: 5, b: 5},"
+ "{a: 6, b: 6}]}"));
assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
}
@@ -3569,10 +3567,10 @@ TEST_F(QueryPlannerTest, OrEnumerationLimit2) {
// 3 $or clauses, and a few other preds. Each $or clause can
// generate up to the max number of allowed $or enumerations.
- runQuery(fromjson(
- "{$or: [{a: 1, b: 1, c: 1, d: 1},"
- "{a: 2, b: 2, c: 2, d: 2},"
- "{a: 3, b: 3, c: 3, d: 3}]}"));
+ runQuery(
+ fromjson("{$or: [{a: 1, b: 1, c: 1, d: 1},"
+ "{a: 2, b: 2, c: 2, d: 2},"
+ "{a: 3, b: 3, c: 3, d: 3}]}"));
assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
}
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index 2258a818547..aae4d95372f 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -34,10 +34,10 @@
#include <algorithm>
-#include "mongo/db/namespace_string.h"
#include "mongo/db/matcher/expression_parser.h"
-#include "mongo/db/matcher/extensions_callback_noop.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
+#include "mongo/db/matcher/extensions_callback_noop.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/query/query_knobs.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_planner_test_lib.h"
@@ -404,8 +404,8 @@ std::unique_ptr<MatchExpression> QueryPlannerTest::parseMatchExpression(const BS
StatusWithMatchExpression status =
MatchExpressionParser::parse(obj, ExtensionsCallbackDisallowExtensions(), collator);
if (!status.isOK()) {
- FAIL(str::stream() << "failed to parse query: " << obj.toString()
- << ". Reason: " << status.getStatus().toString());
+ FAIL(str::stream() << "failed to parse query: " << obj.toString() << ". Reason: "
+ << status.getStatus().toString());
}
return std::move(status.getValue());
}
diff --git a/src/mongo/db/query/query_planner_test_lib.cpp b/src/mongo/db/query/query_planner_test_lib.cpp
index 2e6d9c000d2..a52505acc23 100644
--- a/src/mongo/db/query/query_planner_test_lib.cpp
+++ b/src/mongo/db/query/query_planner_test_lib.cpp
@@ -32,7 +32,6 @@
#include "mongo/db/query/query_planner_test_lib.h"
-#include <ostream>
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
@@ -42,6 +41,7 @@
#include "mongo/db/query/query_solution.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
+#include <ostream>
namespace {
diff --git a/src/mongo/db/query/query_planner_test_lib.h b/src/mongo/db/query/query_planner_test_lib.h
index 0a1931d1828..e8ca0e5c360 100644
--- a/src/mongo/db/query/query_planner_test_lib.h
+++ b/src/mongo/db/query/query_planner_test_lib.h
@@ -30,7 +30,6 @@
* This file contains tests for mongo/db/query/query_planner.cpp
*/
-#include <ostream>
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
@@ -38,6 +37,7 @@
#include "mongo/db/query/query_solution.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
+#include <ostream>
namespace mongo {
diff --git a/src/mongo/db/query/query_planner_text_test.cpp b/src/mongo/db/query/query_planner_text_test.cpp
index 884b8d1dac9..5050653292a 100644
--- a/src/mongo/db/query/query_planner_text_test.cpp
+++ b/src/mongo/db/query/query_planner_text_test.cpp
@@ -51,7 +51,8 @@ using namespace mongo;
TEST_F(QueryPlannerTest, SimpleText) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{$text: {$search: 'blah'}}"));
assertNumSolutions(1);
@@ -63,7 +64,8 @@ TEST_F(QueryPlannerTest, CantUseTextUnlessHaveTextPred) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{a:1}"));
// No table scans allowed so there is no solution.
@@ -76,7 +78,8 @@ TEST_F(QueryPlannerTest, HaveOKPrefixOnTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -95,7 +98,8 @@ TEST_F(QueryPlannerTest, HaveBadPrefixOnTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runInvalidQuery(fromjson("{a:{$gt: 1}, $text:{$search: 'blah'}}"));
runInvalidQuery(fromjson("{$text: {$search: 'blah'}}"));
@@ -108,7 +112,8 @@ TEST_F(QueryPlannerTest, ManyPrefixTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
// Both points.
runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
@@ -133,7 +138,10 @@ TEST_F(QueryPlannerTest, SuffixOptional) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1 << "b" << 1));
+ << "_ftsx"
+ << 1
+ << "b"
+ << 1));
runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -148,7 +156,10 @@ TEST_F(QueryPlannerTest, RemoveFromSubtree) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1 << "b" << 1));
+ << "_ftsx"
+ << 1
+ << "b"
+ << 1));
runQuery(fromjson("{a:1, $or: [{a:1}, {b:7}], $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -164,7 +175,8 @@ TEST_F(QueryPlannerTest, CompoundPrefixEvenIfMultikey) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1),
+ << "_ftsx"
+ << 1),
true);
// Both points.
@@ -177,7 +189,10 @@ TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafPrefix) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1 << "b" << 1));
+ << "_ftsx"
+ << 1
+ << "b"
+ << 1));
// 'a' is not an EQ so it doesn't compound w/the text pred. We also shouldn't use the text
// index to satisfy it w/o the text query.
@@ -188,7 +203,10 @@ TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafSuffixNoPrefix) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1 << "b" << 1));
+ << "_ftsx"
+ << 1
+ << "b"
+ << 1));
runQuery(fromjson("{b:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -198,7 +216,8 @@ TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{$and: [{a: 3}, {$text: {$search: 'foo'}}], a: 3}"));
assertNumSolutions(1U);
@@ -211,7 +230,8 @@ TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndexAndMultiplePredsOnIndexPr
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{$and: [{a: 1}, {a: 2}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
@@ -225,7 +245,8 @@ TEST_F(QueryPlannerTest, TextInsideOrBasic) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{a: 0, $or: [{_id: 1}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
@@ -241,10 +262,11 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
- runQuery(fromjson(
- "{$and: [{$or: [{a: 3}, {a: 4}]}, "
- "{$or: [{$text: {$search: 'foo'}}, {a: 5}]}]}"));
+ << "_ftsx"
+ << 1));
+ runQuery(
+ fromjson("{$and: [{$or: [{a: 3}, {a: 4}]}, "
+ "{$or: [{$text: {$search: 'foo'}}, {a: 5}]}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -260,10 +282,11 @@ TEST_F(QueryPlannerTest, TextInsideOrOfAnd) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
- runQuery(fromjson(
- "{$or: [{a: {$gt: 1, $gt: 2}}, "
- "{a: {$gt: 3}, $text: {$search: 'foo'}}]}"));
+ << "_ftsx"
+ << 1));
+ runQuery(
+ fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
+ "{a: {$gt: 3}, $text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -281,10 +304,11 @@ TEST_F(QueryPlannerTest, TextInsideAndOrAnd) {
addIndex(BSON("b" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
- runQuery(fromjson(
- "{a: 1, $or: [{a:2}, {b:2}, "
- "{a: 1, $text: {$search: 'foo'}}]}"));
+ << "_ftsx"
+ << 1));
+ runQuery(
+ fromjson("{a: 1, $or: [{a:2}, {b:2}, "
+ "{a: 1, $text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -300,12 +324,13 @@ TEST_F(QueryPlannerTest, TextInsideAndOrAndOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
- runQuery(fromjson(
- "{$or: [{a: {$gt: 1, $gt: 2}}, "
- "{a: {$gt: 3}, $or: [{$text: {$search: 'foo'}}, "
- "{a: 6}]}], "
- "a: 5}"));
+ << "_ftsx"
+ << 1));
+ runQuery(
+ fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
+ "{a: {$gt: 3}, $or: [{$text: {$search: 'foo'}}, "
+ "{a: 6}]}], "
+ "a: 5}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -323,7 +348,8 @@ TEST_F(QueryPlannerTest, TextInsideOrOneBranchNotIndexed) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{a: 1, $or: [{b: 2}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(0);
@@ -336,10 +362,11 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
- runQuery(fromjson(
- "{$and: [{$or: [{a: 1}, {b: 1}]}, "
- "{$or: [{a: 2}, {$text: {$search: 'foo'}}]}]}"));
+ << "_ftsx"
+ << 1));
+ runQuery(
+ fromjson("{$and: [{$or: [{a: 1}, {b: 1}]}, "
+ "{$or: [{a: 2}, {$text: {$search: 'foo'}}]}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -351,10 +378,11 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
- runQuery(fromjson(
- "{$text: {$search: 'foo'}, a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}}"));
+ << "_ftsx"
+ << 1));
+ runQuery(
+ fromjson("{$text: {$search: 'foo'}, a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}}"));
// Mandatory text index is used, and geo predicate becomes a filter.
assertNumSolutions(1U);
@@ -365,7 +393,8 @@ TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
TEST_F(QueryPlannerTest, OrTextExact) {
addIndex(BSON("pre" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
addIndex(BSON("other" << 1));
runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: 2}]}"));
@@ -380,7 +409,8 @@ TEST_F(QueryPlannerTest, OrTextExact) {
TEST_F(QueryPlannerTest, OrTextInexactCovered) {
addIndex(BSON("pre" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
addIndex(BSON("other" << 1));
runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: /bar/}]}"));
@@ -395,7 +425,8 @@ TEST_F(QueryPlannerTest, OrTextInexactCovered) {
TEST_F(QueryPlannerTest, TextCaseSensitive) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{$text: {$search: 'blah', $caseSensitive: true}}"));
assertNumSolutions(1);
@@ -405,7 +436,8 @@ TEST_F(QueryPlannerTest, TextCaseSensitive) {
TEST_F(QueryPlannerTest, TextDiacriticSensitive) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{$text: {$search: 'blah', $diacriticSensitive: true}}"));
assertNumSolutions(1);
@@ -415,7 +447,8 @@ TEST_F(QueryPlannerTest, TextDiacriticSensitive) {
TEST_F(QueryPlannerTest, SortKeyMetaProjectionWithTextScoreMetaSort) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuerySortProj(fromjson("{$text: {$search: 'foo'}}"),
fromjson("{a: {$meta: 'textScore'}}"),
diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h
index f64e5d263dc..0a63f8a10e0 100644
--- a/src/mongo/db/query/query_solution.h
+++ b/src/mongo/db/query/query_solution.h
@@ -30,9 +30,9 @@
#include <memory>
+#include "mongo/db/fts/fts_query.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
-#include "mongo/db/fts/fts_query.h"
#include "mongo/db/query/index_bounds.h"
#include "mongo/db/query/plan_cache.h"
#include "mongo/db/query/stage_types.h"
diff --git a/src/mongo/db/query/query_solution_test.cpp b/src/mongo/db/query/query_solution_test.cpp
index ac1b6abbac1..e5d01e4a091 100644
--- a/src/mongo/db/query/query_solution_test.cpp
+++ b/src/mongo/db/query/query_solution_test.cpp
@@ -259,8 +259,10 @@ std::unique_ptr<ParsedProjection> createParsedProjection(const BSONObj& query,
Status status = ParsedProjection::make(
projObj, queryMatchExpr.getValue().get(), &out, ExtensionsCallbackDisallowExtensions());
if (!status.isOK()) {
- FAIL(mongoutils::str::stream() << "failed to parse projection " << projObj
- << " (query: " << query << "): " << status.toString());
+ FAIL(mongoutils::str::stream() << "failed to parse projection " << projObj << " (query: "
+ << query
+ << "): "
+ << status.toString());
}
ASSERT(out);
return std::unique_ptr<ParsedProjection>(out);
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index 74e5c58d4e7..2701dc420ec 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -32,6 +32,8 @@
#include "mongo/db/query/stage_builder.h"
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/database.h"
#include "mongo/db/client.h"
#include "mongo/db/exec/and_hash.h"
#include "mongo/db/exec/and_sorted.h"
@@ -48,14 +50,12 @@
#include "mongo/db/exec/or.h"
#include "mongo/db/exec/projection.h"
#include "mongo/db/exec/shard_filter.h"
+#include "mongo/db/exec/skip.h"
#include "mongo/db/exec/sort.h"
#include "mongo/db/exec/sort_key_generator.h"
-#include "mongo/db/exec/skip.h"
#include "mongo/db/exec/text.h"
#include "mongo/db/index/fts_access_method.h"
#include "mongo/db/matcher/extensions_callback_real.h"
-#include "mongo/db/catalog/collection.h"
-#include "mongo/db/catalog/database.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/range_arithmetic.h b/src/mongo/db/range_arithmetic.h
index 0032fc5b996..09682ada033 100644
--- a/src/mongo/db/range_arithmetic.h
+++ b/src/mongo/db/range_arithmetic.h
@@ -28,8 +28,8 @@
#pragma once
-#include <string>
#include <map>
+#include <string>
#include <vector>
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/db/range_deleter.cpp b/src/mongo/db/range_deleter.cpp
index 8d05a926991..666c8ee45a9 100644
--- a/src/mongo/db/range_deleter.cpp
+++ b/src/mongo/db/range_deleter.cpp
@@ -36,8 +36,8 @@
#include <memory>
#include "mongo/db/client.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/util/concurrency/synchronization.h"
#include "mongo/util/exit.h"
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 9039f488f95..8e726434f7c 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -36,10 +36,10 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/bson_validate.h"
#include "mongo/db/background.h"
-#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/collection.h"
-#include "mongo/db/catalog/database_catalog_entry.h"
+#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/database_catalog_entry.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/index_create.h"
@@ -77,7 +77,10 @@ Status rebuildIndexesOnCollection(OperationContext* txn,
return Status(
ErrorCodes::CannotCreateIndex,
str::stream()
- << "Cannot rebuild index " << spec << ": " << keyStatus.reason()
+ << "Cannot rebuild index "
+ << spec
+ << ": "
+ << keyStatus.reason()
<< " For more info see http://dochub.mongodb.org/core/index-validation");
}
}
@@ -194,8 +197,8 @@ Status repairDatabase(OperationContext* txn,
if (engine->isMmapV1()) {
// MMAPv1 is a layering violation so it implements its own repairDatabase.
- return static_cast<MMAPV1Engine*>(engine)
- ->repairDatabase(txn, dbName, preserveClonedFilesOnFailure, backupOriginalFiles);
+ return static_cast<MMAPV1Engine*>(engine)->repairDatabase(
+ txn, dbName, preserveClonedFilesOnFailure, backupOriginalFiles);
}
// These are MMAPv1 specific
diff --git a/src/mongo/db/repl/applier_test.cpp b/src/mongo/db/repl/applier_test.cpp
index 2b49a92dc19..8ef0bf4db30 100644
--- a/src/mongo/db/repl/applier_test.cpp
+++ b/src/mongo/db/repl/applier_test.cpp
@@ -304,13 +304,16 @@ TEST_F(ApplierTest, ApplyOperationSuccessful) {
Applier::Operations operationsToApply{
OplogEntry(BSON("op"
<< "a"
- << "ts" << Timestamp(Seconds(123), 0))),
+ << "ts"
+ << Timestamp(Seconds(123), 0))),
OplogEntry(BSON("op"
<< "b"
- << "ts" << Timestamp(Seconds(456), 0))),
+ << "ts"
+ << Timestamp(Seconds(456), 0))),
OplogEntry(BSON("op"
<< "c"
- << "ts" << Timestamp(Seconds(789), 0))),
+ << "ts"
+ << Timestamp(Seconds(789), 0))),
};
stdx::mutex mutex;
StatusWith<Timestamp> result = getDetectableErrorStatus();
@@ -352,13 +355,16 @@ void ApplierTest::_testApplyOperationFailed(size_t opIndex, stdx::function<Statu
Applier::Operations operationsToApply{
OplogEntry(BSON("op"
<< "a"
- << "ts" << Timestamp(Seconds(123), 0))),
+ << "ts"
+ << Timestamp(Seconds(123), 0))),
OplogEntry(BSON("op"
<< "b"
- << "ts" << Timestamp(Seconds(456), 0))),
+ << "ts"
+ << Timestamp(Seconds(456), 0))),
OplogEntry(BSON("op"
<< "c"
- << "ts" << Timestamp(Seconds(789), 0))),
+ << "ts"
+ << Timestamp(Seconds(789), 0))),
};
stdx::mutex mutex;
StatusWith<Timestamp> result = getDetectableErrorStatus();
@@ -403,12 +409,11 @@ TEST_F(ApplierTest, ApplyOperationFailedOnFirstOperation) {
}
TEST_F(ApplierTest, ApplyOperationThrowsExceptionOnFirstOperation) {
- _testApplyOperationFailed(0U,
- []() {
- uasserted(ErrorCodes::OperationFailed, "");
- MONGO_UNREACHABLE;
- return Status(ErrorCodes::InternalError, "unreachable");
- });
+ _testApplyOperationFailed(0U, []() {
+ uasserted(ErrorCodes::OperationFailed, "");
+ MONGO_UNREACHABLE;
+ return Status(ErrorCodes::InternalError, "unreachable");
+ });
}
TEST_F(ApplierTest, ApplyOperationFailedOnSecondOperation) {
@@ -416,12 +421,11 @@ TEST_F(ApplierTest, ApplyOperationFailedOnSecondOperation) {
}
TEST_F(ApplierTest, ApplyOperationThrowsExceptionOnSecondOperation) {
- _testApplyOperationFailed(1U,
- []() {
- uasserted(ErrorCodes::OperationFailed, "");
- MONGO_UNREACHABLE;
- return Status(ErrorCodes::InternalError, "unreachable");
- });
+ _testApplyOperationFailed(1U, []() {
+ uasserted(ErrorCodes::OperationFailed, "");
+ MONGO_UNREACHABLE;
+ return Status(ErrorCodes::InternalError, "unreachable");
+ });
}
TEST_F(ApplierTest, ApplyOperationFailedOnLastOperation) {
@@ -429,12 +433,11 @@ TEST_F(ApplierTest, ApplyOperationFailedOnLastOperation) {
}
TEST_F(ApplierTest, ApplyOperationThrowsExceptionOnLastOperation) {
- _testApplyOperationFailed(2U,
- []() {
- uasserted(ErrorCodes::OperationFailed, "");
- MONGO_UNREACHABLE;
- return Status(ErrorCodes::InternalError, "unreachable");
- });
+ _testApplyOperationFailed(2U, []() {
+ uasserted(ErrorCodes::OperationFailed, "");
+ MONGO_UNREACHABLE;
+ return Status(ErrorCodes::InternalError, "unreachable");
+ });
}
class ApplyUntilAndPauseTest : public ApplierTest {};
@@ -454,8 +457,8 @@ TEST_F(ApplyUntilAndPauseTest, NoOperationsInRange) {
auto result = applyUntilAndPause(
&getReplExecutor(),
{
- OplogEntry(BSON("ts" << Timestamp(Seconds(456), 0))),
- OplogEntry(BSON("ts" << Timestamp(Seconds(789), 0))),
+ OplogEntry(BSON("ts" << Timestamp(Seconds(456), 0))),
+ OplogEntry(BSON("ts" << Timestamp(Seconds(789), 0))),
},
[](OperationContext* txn, const OplogEntry& operation) { return Status::OK(); },
Timestamp(Seconds(123), 0),
@@ -594,13 +597,16 @@ void _testApplyUntilAndPauseDiscardOperations(ReplicationExecutor* executor,
Applier::Operations operationsToApply{
OplogEntry(BSON("op"
<< "a"
- << "ts" << Timestamp(Seconds(123), 0))),
+ << "ts"
+ << Timestamp(Seconds(123), 0))),
OplogEntry(BSON("op"
<< "b"
- << "ts" << Timestamp(Seconds(456), 0))),
+ << "ts"
+ << Timestamp(Seconds(456), 0))),
OplogEntry(BSON("op"
<< "c"
- << "ts" << Timestamp(Seconds(789), 0))),
+ << "ts"
+ << Timestamp(Seconds(789), 0))),
};
stdx::mutex mutex;
StatusWith<Timestamp> completionResult = ApplyUntilAndPauseTest::getDetectableErrorStatus();
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.cpp b/src/mongo/db/repl/base_cloner_test_fixture.cpp
index ea57b00133a..90c845c62f0 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.cpp
+++ b/src/mongo/db/repl/base_cloner_test_fixture.cpp
@@ -32,8 +32,8 @@
#include <memory>
-#include "mongo/stdx/thread.h"
#include "mongo/db/jsobj.h"
+#include "mongo/stdx/thread.h"
namespace mongo {
namespace repl {
@@ -45,7 +45,8 @@ const HostAndPort BaseClonerTest::target("localhost", -1);
const NamespaceString BaseClonerTest::nss("db.coll");
const BSONObj BaseClonerTest::idIndexSpec = BSON("v" << 1 << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns" << nss.ns());
+ << "ns"
+ << nss.ns());
// static
BSONObj BaseClonerTest::createCursorResponse(CursorId cursorId,
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.h b/src/mongo/db/repl/base_cloner_test_fixture.h
index cab5c517916..1451adb4960 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.h
+++ b/src/mongo/db/repl/base_cloner_test_fixture.h
@@ -38,8 +38,8 @@
#include "mongo/db/repl/collection_cloner.h"
#include "mongo/db/repl/replication_executor_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/condition_variable.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index c5a5b08a0e7..fb6e689a019 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -304,8 +304,8 @@ void BackgroundSync::_produce(
log() << "Our newest OpTime : " << lastOpTimeFetched;
log() << "Earliest OpTime available is " << syncSourceResp.earliestOpTimeSeen;
log() << "See http://dochub.mongodb.org/core/resyncingaverystalereplicasetmember";
- StorageInterface::get(txn)
- ->setMinValid(txn, {lastOpTimeFetched, syncSourceResp.earliestOpTimeSeen});
+ StorageInterface::get(txn)->setMinValid(
+ txn, {lastOpTimeFetched, syncSourceResp.earliestOpTimeSeen});
auto status = _replCoord->setMaintenanceMode(true);
if (!status.isOK()) {
warning() << "Failed to transition into maintenance mode.";
@@ -439,10 +439,11 @@ void BackgroundSync::_produce(
if (!boundaries.start.isNull() || boundaries.end > lastApplied) {
fassertNoTrace(18750,
Status(ErrorCodes::UnrecoverableRollbackError,
- str::stream()
- << "need to rollback, but in inconsistent state. "
- << "minvalid: " << boundaries.end.toString()
- << " > our last optime: " << lastApplied.toString()));
+ str::stream() << "need to rollback, but in inconsistent state. "
+ << "minvalid: "
+ << boundaries.end.toString()
+ << " > our last optime: "
+ << lastApplied.toString()));
}
_rollback(txn, source, getConnection);
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index b8da58e4372..86527def28a 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -46,18 +46,18 @@
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
-#define ASSERT_REASON_CONTAINS(STATUS, PATTERN) \
- do { \
- const mongo::Status s_ = (STATUS); \
- ASSERT_FALSE(s_.reason().find(PATTERN) == std::string::npos) \
- << #STATUS ".reason() == " << s_.reason(); \
+#define ASSERT_REASON_CONTAINS(STATUS, PATTERN) \
+ do { \
+ const mongo::Status s_ = (STATUS); \
+ ASSERT_FALSE(s_.reason().find(PATTERN) == std::string::npos) << #STATUS ".reason() == " \
+ << s_.reason(); \
} while (false)
-#define ASSERT_NOT_REASON_CONTAINS(STATUS, PATTERN) \
- do { \
- const mongo::Status s_ = (STATUS); \
- ASSERT_TRUE(s_.reason().find(PATTERN) == std::string::npos) \
- << #STATUS ".reason() == " << s_.reason(); \
+#define ASSERT_NOT_REASON_CONTAINS(STATUS, PATTERN) \
+ do { \
+ const mongo::Status s_ = (STATUS); \
+ ASSERT_TRUE(s_.reason().find(PATTERN) == std::string::npos) << #STATUS ".reason() == " \
+ << s_.reason(); \
} while (false)
namespace mongo {
@@ -155,7 +155,9 @@ ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBson) {
TEST_F(CheckQuorumForInitiate, ValidSingleNodeSet) {
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1"))));
startQuorumCheck(config, 0);
@@ -166,7 +168,9 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckCanceledByShutdown) {
_executor->shutdown();
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1"))));
startQuorumCheck(config, 0);
@@ -177,18 +181,21 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSeveralDownNodes) {
// In this test, "we" are host "h3:1". All other nodes time out on
// their heartbeat request, and so the quorum check for initiate
// will fail because some members were unavailable.
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
- << BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
startQuorumCheck(config, 2);
_net->enterNetwork();
const Date_t startDate = _net->now();
@@ -231,15 +238,19 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckSuccessForFiveNodes) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -274,18 +285,25 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToOneDownNode) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
<< "h2:1"
- << "priority" << 0 << "votes" << 0)
+ << "priority"
+ << 0
+ << "votes"
+ << 0)
<< BSON("_id" << 3 << "host"
- << "h3:1") << BSON("_id" << 4 << "host"
- << "h4:1")
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1")
<< BSON("_id" << 5 << "host"
- << "h5:1") << BSON("_id" << 6 << "host"
- << "h6:1"))));
+ << "h5:1")
+ << BSON("_id" << 6 << "host"
+ << "h6:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -334,15 +352,19 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetNameMismatch) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -392,16 +414,21 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))
- << "settings" << BSON("replicaSetId" << replicaSetId)));
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))
+ << "settings"
+ << BSON("replicaSetId" << replicaSetId)));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -449,8 +476,10 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
ASSERT_REASON_CONTAINS(status,
str::stream() << "Our replica set ID of " << replicaSetId
- << " did not match that of " << incompatibleHost.toString()
- << ", which is " << unexpectedId);
+ << " did not match that of "
+ << incompatibleHost.toString()
+ << ", which is "
+ << unexpectedId);
ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
@@ -466,15 +495,19 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -495,7 +528,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
startDate + Milliseconds(10),
ResponseStatus(RemoteCommandResponse(BSON("ok" << 0 << "set"
<< "rs0"
- << "v" << 1),
+ << "v"
+ << 1),
BSONObj(),
Milliseconds(8))));
} else {
@@ -527,15 +561,19 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespo
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -556,7 +594,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespo
startDate + Milliseconds(10),
ResponseStatus(RemoteCommandResponse(BSON("ok" << 0 << "set"
<< "rs0"
- << "v" << 1),
+ << "v"
+ << 1),
BSONObj(),
Milliseconds(8))));
} else {
@@ -583,15 +622,19 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToNodeWithData) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -637,12 +680,15 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1"))));
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -663,7 +709,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
startDate + Milliseconds(10),
ResponseStatus(RemoteCommandResponse(BSON("ok" << 0 << "set"
<< "rs0"
- << "v" << 5),
+ << "v"
+ << 5),
BSONObj(),
Milliseconds(8))));
} else {
@@ -688,12 +735,15 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToIncompatibleSetName) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1"))));
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -740,18 +790,27 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "votes" << 0 << "priority" << 0))));
+ << "votes"
+ << 0
+ << "priority"
+ << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -797,18 +856,23 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "priority" << 0)
+ << "priority"
+ << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "priority" << 0))));
+ << "priority"
+ << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -850,18 +914,27 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "votes" << 0 << "priority" << 0))));
+ << "votes"
+ << 0
+ << "priority"
+ << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 95c2fd66baf..ceae2886956 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -107,8 +107,8 @@ std::string CollectionCloner::getDiagnosticString() const {
output << " active: " << _active;
output << " listIndexes fetcher: " << _listIndexesFetcher.getDiagnosticString();
output << " find fetcher: " << _findFetcher.getDiagnosticString();
- output << " database worked callback handle: " << (_dbWorkCallbackHandle.isValid() ? "valid"
- : "invalid");
+ output << " database worked callback handle: "
+ << (_dbWorkCallbackHandle.isValid() ? "valid" : "invalid");
return output;
}
diff --git a/src/mongo/db/repl/collection_cloner.h b/src/mongo/db/repl/collection_cloner.h
index cf69d7f44ef..a7d9000bbfb 100644
--- a/src/mongo/db/repl/collection_cloner.h
+++ b/src/mongo/db/repl/collection_cloner.h
@@ -40,8 +40,8 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/base_cloner.h"
#include "mongo/db/repl/replication_executor.h"
-#include "mongo/stdx/functional.h"
#include "mongo/stdx/condition_variable.h"
+#include "mongo/stdx/functional.h"
#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index 5ac2c71c992..c0320dc16b6 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -140,7 +140,8 @@ TEST_F(CollectionClonerTest, RemoteCollectionMissing) {
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< ""
- << "code" << ErrorCodes::NamespaceNotFound));
+ << "code"
+ << ErrorCodes::NamespaceNotFound));
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, getStatus().code());
ASSERT_FALSE(collectionCloner->isActive());
@@ -238,10 +239,12 @@ TEST_F(CollectionClonerTest, BeginCollection) {
const std::vector<BSONObj> specs = {idIndexSpec,
BSON("v" << 1 << "key" << BSON("a" << 1) << "name"
<< "a_1"
- << "ns" << nss.ns()),
+ << "ns"
+ << nss.ns()),
BSON("v" << 1 << "key" << BSON("b" << 1) << "name"
<< "b_1"
- << "ns" << nss.ns())};
+ << "ns"
+ << nss.ns())};
processNetworkResponse(createListIndexesResponse(1, BSON_ARRAY(specs[0] << specs[1])));
@@ -329,7 +332,8 @@ TEST_F(CollectionClonerTest, FindCommandFailed) {
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< ""
- << "code" << ErrorCodes::CursorNotFound));
+ << "code"
+ << ErrorCodes::CursorNotFound));
ASSERT_EQUALS(ErrorCodes::CursorNotFound, getStatus().code());
ASSERT_FALSE(collectionCloner->isActive());
diff --git a/src/mongo/db/repl/data_replicator.cpp b/src/mongo/db/repl/data_replicator.cpp
index f09013af383..ff99eb594b2 100644
--- a/src/mongo/db/repl/data_replicator.cpp
+++ b/src/mongo/db/repl/data_replicator.cpp
@@ -775,8 +775,8 @@ void DataReplicator::_onDataClonerFinish(const Status& status) {
return;
}
- BSONObj query = BSON("find" << _opts.remoteOplogNS.coll() << "sort" << BSON("$natural" << -1)
- << "limit" << 1);
+ BSONObj query = BSON(
+ "find" << _opts.remoteOplogNS.coll() << "sort" << BSON("$natural" << -1) << "limit" << 1);
TimestampStatus timestampStatus(ErrorCodes::BadValue, "");
_tmpFetcher = stdx::make_unique<Fetcher>(
diff --git a/src/mongo/db/repl/data_replicator.h b/src/mongo/db/repl/data_replicator.h
index e1c491ebd4d..bde976acd9c 100644
--- a/src/mongo/db/repl/data_replicator.h
+++ b/src/mongo/db/repl/data_replicator.h
@@ -37,10 +37,10 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/timestamp.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/repl/multiapplier.h"
#include "mongo/db/repl/collection_cloner.h"
-#include "mongo/db/repl/database_cloner.h"
#include "mongo/db/repl/data_replicator_external_state.h"
+#include "mongo/db/repl/database_cloner.h"
+#include "mongo/db/repl/multiapplier.h"
#include "mongo/db/repl/oplog_fetcher.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_executor.h"
diff --git a/src/mongo/db/repl/data_replicator_test.cpp b/src/mongo/db/repl/data_replicator_test.cpp
index 300100d4726..43e42f7cc5e 100644
--- a/src/mongo/db/repl/data_replicator_test.cpp
+++ b/src/mongo/db/repl/data_replicator_test.cpp
@@ -40,18 +40,18 @@
#include "mongo/db/repl/data_replicator_external_state_mock.h"
#include "mongo/db/repl/member_state.h"
#include "mongo/db/repl/optime.h"
-#include "mongo/db/repl/update_position_args.h"
-#include "mongo/db/repl/replication_executor_test_fixture.h"
#include "mongo/db/repl/replication_executor.h"
+#include "mongo/db/repl/replication_executor_test_fixture.h"
#include "mongo/db/repl/reporter.h"
#include "mongo/db/repl/storage_interface.h"
#include "mongo/db/repl/storage_interface_mock.h"
-#include "mongo/db/repl/sync_source_selector.h"
#include "mongo/db/repl/sync_source_resolver.h"
+#include "mongo/db/repl/sync_source_selector.h"
+#include "mongo/db/repl/update_position_args.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/stdx/mutex.h"
-#include "mongo/util/fail_point_service.h"
#include "mongo/util/concurrency/thread_name.h"
+#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -105,8 +105,9 @@ public:
* clear/reset state
*/
void reset() {
- _rollbackFn = [](OperationContext*, const OpTime&, const HostAndPort&)
- -> Status { return Status::OK(); };
+ _rollbackFn = [](OperationContext*, const OpTime&, const HostAndPort&) -> Status {
+ return Status::OK();
+ };
_setMyLastOptime = [this](const OpTime& opTime) { _myLastOpTime = opTime; };
_myLastOpTime = OpTime();
_memberState = MemberState::RS_UNKNOWN;
@@ -198,7 +199,7 @@ protected:
options.prepareReplSetUpdatePositionCommandFn =
[](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle commandStyle)
- -> StatusWith<BSONObj> { return BSON(UpdatePositionArgs::kCommandFieldName << 1); };
+ -> StatusWith<BSONObj> { return BSON(UpdatePositionArgs::kCommandFieldName << 1); };
options.getMyLastOptime = [this]() { return _myLastOpTime; };
options.setMyLastOptime = [this](const OpTime& opTime) { _setMyLastOptime(opTime); };
options.setFollowerMode = [this](const MemberState& state) {
@@ -209,13 +210,17 @@ protected:
options.syncSourceSelector = this;
options.getReplSetConfig = []() {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "myset"
- << "version" << 1 << "protocolVersion" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
- << BSON("electionTimeoutMillis" << 10000))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "myset"
+ << "version"
+ << 1
+ << "protocolVersion"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings"
+ << BSON("electionTimeoutMillis" << 10000))));
return config;
};
@@ -333,10 +338,9 @@ protected:
_storage.beginCollectionFn = _beginCollectionFn;
_storage.insertDocumentsFn = _insertCollectionFn;
- _storage.insertMissingDocFn =
- [&](OperationContext* txn, const NamespaceString& nss, const BSONObj& doc) {
- return Status::OK();
- };
+ _storage.insertMissingDocFn = [&](OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& doc) { return Status::OK(); };
dr->_setInitialSyncStorageInterface(&_storage);
_isbr.reset(new InitialSyncBackgroundRunner(dr));
@@ -366,11 +370,15 @@ protected:
const long long cursorId = cmdElem.numberLong();
if (isGetMore && cursorId == 1LL) {
// process getmore requests from the oplog fetcher
- auto respBSON = fromjson(str::stream()
- << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs'"
- " , nextBatch:[{ts:Timestamp(" << ++c
- << ",1), h:1, ns:'test.a', v:" << OplogEntry::kOplogVersion
- << ", op:'u', o2:{_id:" << c << "}, o:{$set:{a:1}}}]}}");
+ auto respBSON =
+ fromjson(str::stream() << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs'"
+ " , nextBatch:[{ts:Timestamp("
+ << ++c
+ << ",1), h:1, ns:'test.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'u', o2:{_id:"
+ << c
+ << "}, o:{$set:{a:1}}}]}}");
net->scheduleResponse(
noi,
net->now(),
@@ -446,47 +454,50 @@ TEST_F(InitialSyncTest, Complete) {
*
*/
- const std::vector<BSONObj> responses = {
- // get rollback id
- fromjson(str::stream() << "{ok: 1, rbid:1}"),
- // get latest oplog ts
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, a:1}}]}}"),
- // oplog fetcher find
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, a:1}}]}}"),
- // Clone Start
- // listDatabases
- fromjson("{ok:1, databases:[{name:'a'}]}"),
- // listCollections for "a"
- fromjson(
- "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listCollections', firstBatch:["
- "{name:'a', options:{}} "
- "]}}"),
- // listIndexes:a
- fromjson(str::stream()
- << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
- "{v:" << OplogEntry::kOplogVersion
- << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}"),
- // find:a
- fromjson(
- "{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
- "{_id:1, a:1} "
- "]}}"),
- // Clone Done
- // get latest oplog ts
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(2,2), h:1, ns:'b.c', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, c:1}}]}}"),
- // Applier starts ...
- // check for rollback
- fromjson(str::stream() << "{ok: 1, rbid:1}"),
- };
+ const std::vector<BSONObj> responses =
+ {
+ // get rollback id
+ fromjson(str::stream() << "{ok: 1, rbid:1}"),
+ // get latest oplog ts
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, a:1}}]}}"),
+ // oplog fetcher find
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, a:1}}]}}"),
+ // Clone Start
+ // listDatabases
+ fromjson("{ok:1, databases:[{name:'a'}]}"),
+ // listCollections for "a"
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listCollections', firstBatch:["
+ "{name:'a', options:{}} "
+ "]}}"),
+ // listIndexes:a
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
+ "{v:"
+ << OplogEntry::kOplogVersion
+ << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}"),
+ // find:a
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
+ "{_id:1, a:1} "
+ "]}}"),
+ // Clone Done
+ // get latest oplog ts
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(2,2), h:1, ns:'b.c', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, c:1}}]}}"),
+ // Applier starts ...
+ // check for rollback
+ fromjson(str::stream() << "{ok: 1, rbid:1}"),
+ };
// Initial sync flag should not be set before starting.
ASSERT_FALSE(StorageInterface::get(getGlobalServiceContext())
@@ -516,58 +527,61 @@ TEST_F(InitialSyncTest, Complete) {
TEST_F(InitialSyncTest, MissingDocOnMultiApplyCompletes) {
DataReplicatorOptions opts;
int applyCounter{0};
- getExternalState()->multiApplyFn =
- [&](OperationContext*, const MultiApplier::Operations& ops, MultiApplier::ApplyOperationFn)
- -> StatusWith<OpTime> {
- if (++applyCounter == 1) {
- return Status(ErrorCodes::NoMatchingDocument, "failed: missing doc.");
- }
- return ops.back().getOpTime();
- };
-
- const std::vector<BSONObj> responses = {
- // get rollback id
- fromjson(str::stream() << "{ok: 1, rbid:1}"),
- // get latest oplog ts
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, a:1}}]}}"),
- // oplog fetcher find
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
- << ", op:'u', o2:{_id:1}, o:{$set:{a:1}}}]}}"),
- // Clone Start
- // listDatabases
- fromjson("{ok:1, databases:[{name:'a'}]}"),
- // listCollections for "a"
- fromjson(
- "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listCollections', firstBatch:["
- "{name:'a', options:{}} "
- "]}}"),
- // listIndexes:a
- fromjson(str::stream()
- << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
- "{v:" << OplogEntry::kOplogVersion
- << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}"),
- // find:a -- empty
- fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:[]}}"),
- // Clone Done
- // get latest oplog ts
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(2,2), h:1, ns:'b.c', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, c:1}}]}}"),
- // Applier starts ...
- // missing doc fetch -- find:a {_id:1}
- fromjson(
- "{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
- "{_id:1, a:1} "
- "]}}"),
- // check for rollback
- fromjson(str::stream() << "{ok: 1, rbid:1}"),
+ getExternalState()->multiApplyFn = [&](OperationContext*,
+ const MultiApplier::Operations& ops,
+ MultiApplier::ApplyOperationFn) -> StatusWith<OpTime> {
+ if (++applyCounter == 1) {
+ return Status(ErrorCodes::NoMatchingDocument, "failed: missing doc.");
+ }
+ return ops.back().getOpTime();
};
+
+ const std::vector<BSONObj> responses =
+ {
+ // get rollback id
+ fromjson(str::stream() << "{ok: 1, rbid:1}"),
+ // get latest oplog ts
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, a:1}}]}}"),
+ // oplog fetcher find
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'u', o2:{_id:1}, o:{$set:{a:1}}}]}}"),
+ // Clone Start
+ // listDatabases
+ fromjson("{ok:1, databases:[{name:'a'}]}"),
+ // listCollections for "a"
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listCollections', firstBatch:["
+ "{name:'a', options:{}} "
+ "]}}"),
+ // listIndexes:a
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
+ "{v:"
+ << OplogEntry::kOplogVersion
+ << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}"),
+ // find:a -- empty
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:[]}}"),
+ // Clone Done
+ // get latest oplog ts
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(2,2), h:1, ns:'b.c', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, c:1}}]}}"),
+ // Applier starts ...
+ // missing doc fetch -- find:a {_id:1}
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
+ "{_id:1, a:1} "
+ "]}}"),
+ // check for rollback
+ fromjson(str::stream() << "{ok: 1, rbid:1}"),
+ };
startSync();
setResponses(responses);
playResponses(true);
@@ -581,7 +595,9 @@ TEST_F(InitialSyncTest, Failpoint) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -611,12 +627,14 @@ TEST_F(InitialSyncTest, FailsOnClone) {
// get latest oplog ts
fromjson(
str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
<< ", op:'i', o:{_id:1, a:1}}]}}"),
// oplog fetcher find
fromjson(
str::stream() << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
<< ", op:'i', o:{_id:1, a:1}}]}}"),
// Clone Start
// listDatabases
@@ -631,47 +649,50 @@ TEST_F(InitialSyncTest, FailsOnClone) {
}
TEST_F(InitialSyncTest, FailOnRollback) {
- const std::vector<BSONObj> responses = {
- // get rollback id
- fromjson(str::stream() << "{ok: 1, rbid:1}"),
- // get latest oplog ts
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, a:1}}]}}"),
- // oplog fetcher find
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, a:1}}]}}"),
- // Clone Start
- // listDatabases
- fromjson("{ok:1, databases:[{name:'a'}]}"),
- // listCollections for "a"
- fromjson(
- "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listCollections', firstBatch:["
- "{name:'a', options:{}} "
- "]}}"),
- // listIndexes:a
- fromjson(str::stream()
- << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
- "{v:" << OplogEntry::kOplogVersion
- << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}"),
- // find:a
- fromjson(
- "{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
- "{_id:1, a:1} "
- "]}}"),
- // Clone Done
- // get latest oplog ts
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(2,2), h:1, ns:'b.c', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, c:1}}]}}"),
- // Applier starts ...
- // check for rollback
- fromjson(str::stream() << "{ok: 1, rbid:2}"),
- };
+ const std::vector<BSONObj> responses =
+ {
+ // get rollback id
+ fromjson(str::stream() << "{ok: 1, rbid:1}"),
+ // get latest oplog ts
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, a:1}}]}}"),
+ // oplog fetcher find
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, a:1}}]}}"),
+ // Clone Start
+ // listDatabases
+ fromjson("{ok:1, databases:[{name:'a'}]}"),
+ // listCollections for "a"
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listCollections', firstBatch:["
+ "{name:'a', options:{}} "
+ "]}}"),
+ // listIndexes:a
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
+ "{v:"
+ << OplogEntry::kOplogVersion
+ << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}"),
+ // find:a
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
+ "{_id:1, a:1} "
+ "]}}"),
+ // Clone Done
+ // get latest oplog ts
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(2,2), h:1, ns:'b.c', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, c:1}}]}}"),
+ // Applier starts ...
+ // check for rollback
+ fromjson(str::stream() << "{ok: 1, rbid:2}"),
+ };
startSync();
setResponses({responses});
@@ -984,26 +1005,30 @@ TEST_F(SteadyStateTest, RollbackTwoSyncSourcesSecondRollbackSucceeds) {
TEST_F(SteadyStateTest, PauseDataReplicator) {
auto lastOperationApplied = BSON("op"
<< "a"
- << "v" << OplogEntry::kOplogVersion << "ts"
+ << "v"
+ << OplogEntry::kOplogVersion
+ << "ts"
<< Timestamp(Seconds(123), 0));
auto operationToApply = BSON("op"
<< "a"
- << "v" << OplogEntry::kOplogVersion << "ts"
+ << "v"
+ << OplogEntry::kOplogVersion
+ << "ts"
<< Timestamp(Seconds(456), 0));
stdx::mutex mutex;
unittest::Barrier barrier(2U);
Timestamp lastTimestampApplied;
BSONObj operationApplied;
- getExternalState()->multiApplyFn =
- [&](OperationContext*, const MultiApplier::Operations& ops, MultiApplier::ApplyOperationFn)
- -> StatusWith<OpTime> {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- operationApplied = ops.back().raw;
- barrier.countDownAndWait();
- return ops.back().getOpTime();
- };
+ getExternalState()->multiApplyFn = [&](OperationContext*,
+ const MultiApplier::Operations& ops,
+ MultiApplier::ApplyOperationFn) -> StatusWith<OpTime> {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ operationApplied = ops.back().raw;
+ barrier.countDownAndWait();
+ return ops.back().getOpTime();
+ };
DataReplicatorOptions::SetMyLastOptimeFn oldSetMyLastOptime = _setMyLastOptime;
_setMyLastOptime = [&](const OpTime& opTime) {
oldSetMyLastOptime(opTime);
@@ -1076,26 +1101,30 @@ TEST_F(SteadyStateTest, PauseDataReplicator) {
TEST_F(SteadyStateTest, ApplyOneOperation) {
auto lastOperationApplied = BSON("op"
<< "a"
- << "v" << OplogEntry::kOplogVersion << "ts"
+ << "v"
+ << OplogEntry::kOplogVersion
+ << "ts"
<< Timestamp(Seconds(123), 0));
auto operationToApply = BSON("op"
<< "a"
- << "v" << OplogEntry::kOplogVersion << "ts"
+ << "v"
+ << OplogEntry::kOplogVersion
+ << "ts"
<< Timestamp(Seconds(456), 0));
stdx::mutex mutex;
unittest::Barrier barrier(2U);
Timestamp lastTimestampApplied;
BSONObj operationApplied;
- getExternalState()->multiApplyFn =
- [&](OperationContext*, const MultiApplier::Operations& ops, MultiApplier::ApplyOperationFn)
- -> StatusWith<OpTime> {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- operationApplied = ops.back().raw;
- barrier.countDownAndWait();
- return ops.back().getOpTime();
- };
+ getExternalState()->multiApplyFn = [&](OperationContext*,
+ const MultiApplier::Operations& ops,
+ MultiApplier::ApplyOperationFn) -> StatusWith<OpTime> {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ operationApplied = ops.back().raw;
+ barrier.countDownAndWait();
+ return ops.back().getOpTime();
+ };
DataReplicatorOptions::SetMyLastOptimeFn oldSetMyLastOptime = _setMyLastOptime;
_setMyLastOptime = [&](const OpTime& opTime) {
oldSetMyLastOptime(opTime);
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index da4d3c33887..37e53a97776 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -219,16 +219,17 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
for (auto&& info : _collectionInfos) {
BSONElement nameElement = info.getField(kNameFieldName);
if (nameElement.eoo()) {
- _finishCallback(Status(ErrorCodes::FailedToParse,
- str::stream() << "collection info must contain '"
- << kNameFieldName << "' "
- << "field : " << info));
+ _finishCallback(
+ Status(ErrorCodes::FailedToParse,
+ str::stream() << "collection info must contain '" << kNameFieldName << "' "
+ << "field : "
+ << info));
return;
}
if (nameElement.type() != mongo::String) {
- _finishCallback(Status(ErrorCodes::TypeMismatch,
- str::stream() << "'" << kNameFieldName
- << "' field must be a string: " << info));
+ _finishCallback(Status(
+ ErrorCodes::TypeMismatch,
+ str::stream() << "'" << kNameFieldName << "' field must be a string: " << info));
return;
}
const std::string collectionName = nameElement.String();
@@ -236,22 +237,27 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
_finishCallback(Status(ErrorCodes::DuplicateKey,
str::stream()
<< "collection info contains duplicate collection name "
- << "'" << collectionName << "': " << info));
+ << "'"
+ << collectionName
+ << "': "
+ << info));
return;
}
BSONElement optionsElement = info.getField(kOptionsFieldName);
if (optionsElement.eoo()) {
- _finishCallback(Status(ErrorCodes::FailedToParse,
- str::stream() << "collection info must contain '"
- << kOptionsFieldName << "' "
- << "field : " << info));
+ _finishCallback(Status(
+ ErrorCodes::FailedToParse,
+ str::stream() << "collection info must contain '" << kOptionsFieldName << "' "
+ << "field : "
+ << info));
return;
}
if (!optionsElement.isABSONObj()) {
_finishCallback(Status(ErrorCodes::TypeMismatch,
str::stream() << "'" << kOptionsFieldName
- << "' field must be an object: " << info));
+ << "' field must be an object: "
+ << info));
return;
}
const BSONObj optionsObj = optionsElement.Obj();
diff --git a/src/mongo/db/repl/database_cloner.h b/src/mongo/db/repl/database_cloner.h
index 954f816cdaa..79dcf1529e2 100644
--- a/src/mongo/db/repl/database_cloner.h
+++ b/src/mongo/db/repl/database_cloner.h
@@ -37,8 +37,8 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/client/fetcher.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/repl/collection_cloner.h"
#include "mongo/db/repl/base_cloner.h"
+#include "mongo/db/repl/collection_cloner.h"
#include "mongo/db/repl/replication_executor.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/mutex.h"
diff --git a/src/mongo/db/repl/database_cloner_test.cpp b/src/mongo/db/repl/database_cloner_test.cpp
index d5494d80345..78d70018ae3 100644
--- a/src/mongo/db/repl/database_cloner_test.cpp
+++ b/src/mongo/db/repl/database_cloner_test.cpp
@@ -190,7 +190,8 @@ TEST_F(DatabaseClonerTest, InvalidListCollectionsFilter) {
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "unknown operator"
- << "code" << ErrorCodes::BadValue));
+ << "code"
+ << ErrorCodes::BadValue));
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
ASSERT_FALSE(databaseCloner->isActive());
@@ -214,8 +215,9 @@ TEST_F(DatabaseClonerTest, ListCollectionsReturnedNoCollections) {
}
TEST_F(DatabaseClonerTest, ListCollectionsPredicate) {
- DatabaseCloner::ListCollectionsPredicateFn pred =
- [](const BSONObj& info) { return info["name"].String() != "b"; };
+ DatabaseCloner::ListCollectionsPredicateFn pred = [](const BSONObj& info) {
+ return info["name"].String() != "b";
+ };
databaseCloner.reset(new DatabaseCloner(
&getReplExecutor(),
target,
@@ -232,13 +234,16 @@ TEST_F(DatabaseClonerTest, ListCollectionsPredicate) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options" << BSONObj()),
+ << "options"
+ << BSONObj()),
BSON("name"
<< "b"
- << "options" << BSONObj()),
+ << "options"
+ << BSONObj()),
BSON("name"
<< "c"
- << "options" << BSONObj())};
+ << "options"
+ << BSONObj())};
processNetworkResponse(createListCollectionsResponse(
0, BSON_ARRAY(sourceInfos[0] << sourceInfos[1] << sourceInfos[2])));
@@ -256,10 +261,12 @@ TEST_F(DatabaseClonerTest, ListCollectionsMultipleBatches) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options" << BSONObj()),
+ << "options"
+ << BSONObj()),
BSON("name"
<< "b"
- << "options" << BSONObj())};
+ << "options"
+ << BSONObj())};
processNetworkResponse(createListCollectionsResponse(1, BSON_ARRAY(sourceInfos[0])));
ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
@@ -305,11 +312,11 @@ TEST_F(DatabaseClonerTest, CollectionInfoNameNotAString) {
TEST_F(DatabaseClonerTest, CollectionInfoNameEmpty) {
ASSERT_OK(databaseCloner->start());
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << ""
- << "options" << BSONObj()))));
+ processNetworkResponse(createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << ""
+ << "options"
+ << BSONObj()))));
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
ASSERT_STRING_CONTAINS(getStatus().reason(), "invalid collection namespace: db.");
ASSERT_FALSE(databaseCloner->isActive());
@@ -317,14 +324,15 @@ TEST_F(DatabaseClonerTest, CollectionInfoNameEmpty) {
TEST_F(DatabaseClonerTest, CollectionInfoNameDuplicate) {
ASSERT_OK(databaseCloner->start());
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options" << BSONObj())
- << BSON("name"
- << "a"
- << "options" << BSONObj()))));
+ processNetworkResponse(createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options"
+ << BSONObj())
+ << BSON("name"
+ << "a"
+ << "options"
+ << BSONObj()))));
ASSERT_EQUALS(ErrorCodes::DuplicateKey, getStatus().code());
ASSERT_STRING_CONTAINS(getStatus().reason(), "duplicate collection name 'a'");
ASSERT_FALSE(databaseCloner->isActive());
@@ -345,7 +353,8 @@ TEST_F(DatabaseClonerTest, CollectionInfoOptionsNotAnObject) {
processNetworkResponse(createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options" << 123))));
+ << "options"
+ << 123))));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, getStatus().code());
ASSERT_STRING_CONTAINS(getStatus().reason(), "'options' field must be an object");
ASSERT_FALSE(databaseCloner->isActive());
@@ -355,11 +364,11 @@ TEST_F(DatabaseClonerTest, InvalidCollectionOptions) {
ASSERT_OK(databaseCloner->start());
processNetworkResponse(
- createListCollectionsResponse(
- 0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options" << BSON("storageEngine" << 1)))));
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options"
+ << BSON("storageEngine" << 1)))));
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
ASSERT_FALSE(databaseCloner->isActive());
@@ -380,11 +389,11 @@ TEST_F(DatabaseClonerTest, ListCollectionsReturnsEmptyCollectionName) {
stdx::bind(&DatabaseClonerTest::setStatus, this, stdx::placeholders::_1)));
ASSERT_OK(databaseCloner->start());
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << ""
- << "options" << BSONObj()))));
+ processNetworkResponse(createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << ""
+ << "options"
+ << BSONObj()))));
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
ASSERT_STRING_CONTAINS(getStatus().reason(), "invalid collection namespace: db.");
@@ -397,11 +406,11 @@ TEST_F(DatabaseClonerTest, StartFirstCollectionClonerFailed) {
databaseCloner->setStartCollectionClonerFn(
[](CollectionCloner& cloner) { return Status(ErrorCodes::OperationFailed, ""); });
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options" << BSONObj()))));
+ processNetworkResponse(createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options"
+ << BSONObj()))));
ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
ASSERT_FALSE(databaseCloner->isActive());
@@ -424,14 +433,15 @@ TEST_F(DatabaseClonerTest, StartSecondCollectionClonerFailed) {
return cloner.start();
});
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options" << BSONObj())
- << BSON("name"
- << "b"
- << "options" << BSONObj()))));
+ processNetworkResponse(createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options"
+ << BSONObj())
+ << BSON("name"
+ << "b"
+ << "options"
+ << BSONObj()))));
processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
processNetworkResponse(createCursorResponse(0, BSONArray()));
@@ -452,10 +462,12 @@ TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options" << BSONObj()),
+ << "options"
+ << BSONObj()),
BSON("name"
<< "b"
- << "options" << BSONObj())};
+ << "options"
+ << BSONObj())};
processNetworkResponse(
createListCollectionsResponse(0, BSON_ARRAY(sourceInfos[0] << sourceInfos[1])));
@@ -466,7 +478,8 @@ TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) {
// This affects the order of the network responses.
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< ""
- << "code" << ErrorCodes::NamespaceNotFound));
+ << "code"
+ << ErrorCodes::NamespaceNotFound));
processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
processNetworkResponse(createCursorResponse(0, BSONArray()));
@@ -497,10 +510,12 @@ TEST_F(DatabaseClonerTest, CreateCollections) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options" << BSONObj()),
+ << "options"
+ << BSONObj()),
BSON("name"
<< "b"
- << "options" << BSONObj())};
+ << "options"
+ << BSONObj())};
processNetworkResponse(
createListCollectionsResponse(0, BSON_ARRAY(sourceInfos[0] << sourceInfos[1])));
diff --git a/src/mongo/db/repl/database_task.h b/src/mongo/db/repl/database_task.h
index 29f10f2902c..bde2df64c09 100644
--- a/src/mongo/db/repl/database_task.h
+++ b/src/mongo/db/repl/database_task.h
@@ -31,8 +31,8 @@
#include <string>
#include "mongo/db/concurrency/lock_manager_defs.h"
-#include "mongo/db/repl/task_runner.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/repl/task_runner.h"
namespace mongo {
diff --git a/src/mongo/db/repl/elect_cmd_runner_test.cpp b/src/mongo/db/repl/elect_cmd_runner_test.cpp
index 1b4a82902c2..c92f931cf34 100644
--- a/src/mongo/db/repl/elect_cmd_runner_test.cpp
+++ b/src/mongo/db/repl/elect_cmd_runner_test.cpp
@@ -99,9 +99,13 @@ ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBson) {
const BSONObj makeElectRequest(const ReplicaSetConfig& rsConfig, int selfIndex) {
const MemberConfig& myConfig = rsConfig.getMemberAt(selfIndex);
return BSON("replSetElect" << 1 << "set" << rsConfig.getReplSetName() << "who"
- << myConfig.getHostAndPort().toString() << "whoid"
- << myConfig.getId() << "cfgver" << rsConfig.getConfigVersion()
- << "round" << 380865962699346850ll);
+ << myConfig.getHostAndPort().toString()
+ << "whoid"
+ << myConfig.getId()
+ << "cfgver"
+ << rsConfig.getConfigVersion()
+ << "round"
+ << 380865962699346850ll);
}
BSONObj stripRound(const BSONObj& orig) {
@@ -158,7 +162,9 @@ TEST_F(ElectCmdRunnerTest, OneNode) {
// Only one node in the config.
const ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1"))));
@@ -174,7 +180,9 @@ TEST_F(ElectCmdRunnerTest, TwoNodes) {
const ReplicaSetConfig config =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -210,7 +218,9 @@ TEST_F(ElectCmdRunnerTest, ShuttingDown) {
// Two nodes, we are node h1. Shutdown happens while we're scheduling remote commands.
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -307,26 +317,33 @@ protected:
BSONObj threeNodesTwoArbitersConfig() {
return BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0")
<< BSON("_id" << 1 << "host"
<< "host1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "host2"
- << "arbiterOnly" << true)));
+ << "arbiterOnly"
+ << true)));
}
BSONObj basicThreeNodeConfig() {
return BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0")
<< BSON("_id" << 1 << "host"
- << "host1") << BSON("_id" << 2 << "host"
- << "host2")));
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")));
}
private:
diff --git a/src/mongo/db/repl/freshness_checker_test.cpp b/src/mongo/db/repl/freshness_checker_test.cpp
index 45fde9ef816..2d0917254ec 100644
--- a/src/mongo/db/repl/freshness_checker_test.cpp
+++ b/src/mongo/db/repl/freshness_checker_test.cpp
@@ -121,9 +121,13 @@ const BSONObj makeFreshRequest(const ReplicaSetConfig& rsConfig,
int selfIndex) {
const MemberConfig& myConfig = rsConfig.getMemberAt(selfIndex);
return BSON("replSetFresh" << 1 << "set" << rsConfig.getReplSetName() << "opTime"
- << Date_t::fromMillisSinceEpoch(lastOpTimeApplied.asLL()) << "who"
- << myConfig.getHostAndPort().toString() << "cfgver"
- << rsConfig.getConfigVersion() << "id" << myConfig.getId());
+ << Date_t::fromMillisSinceEpoch(lastOpTimeApplied.asLL())
+ << "who"
+ << myConfig.getHostAndPort().toString()
+ << "cfgver"
+ << rsConfig.getConfigVersion()
+ << "id"
+ << myConfig.getId());
}
// This is necessary because the run method must be scheduled in the Replication Executor
@@ -159,7 +163,9 @@ TEST_F(FreshnessCheckerTest, TwoNodes) {
// Two nodes, we are node h1. We are freshest, but we tie with h2.
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -177,16 +183,19 @@ TEST_F(FreshnessCheckerTest, TwoNodes) {
ASSERT_EQUALS("admin", noi->getRequest().dbname);
ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1 << "id" << 2 << "set"
- << "rs0"
- << "who"
- << "h1"
- << "cfgver" << 1 << "opTime" << Date_t()),
- BSONObj(),
- Milliseconds(8))));
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1 << "id" << 2 << "set"
+ << "rs0"
+ << "who"
+ << "h1"
+ << "cfgver"
+ << 1
+ << "opTime"
+ << Date_t()),
+ BSONObj(),
+ Milliseconds(8))));
}
_net->runUntil(startDate + Milliseconds(10));
_net->exitNetwork();
@@ -199,7 +208,9 @@ TEST_F(FreshnessCheckerTest, ShuttingDown) {
// Two nodes, we are node h1. Shutdown happens while we're scheduling remote commands.
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -222,7 +233,9 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshest) {
startCapturingLogMessages();
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -248,8 +261,12 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshest) {
<< "rs0"
<< "who"
<< "h1"
- << "cfgver" << 1 << "fresher" << true
- << "opTime" << Date_t()),
+ << "cfgver"
+ << 1
+ << "fresher"
+ << true
+ << "opTime"
+ << Date_t()),
BSONObj(),
Milliseconds(8))));
}
@@ -268,7 +285,9 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTime) {
startCapturingLogMessages();
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -295,7 +314,9 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTime) {
<< "rs0"
<< "who"
<< "h1"
- << "cfgver" << 1 << "opTime"
+ << "cfgver"
+ << 1
+ << "opTime"
<< Date_t::fromMillisSinceEpoch(Timestamp(10, 0).asLL())),
BSONObj(),
Milliseconds(8))));
@@ -314,7 +335,9 @@ TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponse) {
startCapturingLogMessages();
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -340,7 +363,10 @@ TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponse) {
<< "rs0"
<< "who"
<< "h1"
- << "cfgver" << 1 << "opTime" << 3),
+ << "cfgver"
+ << 1
+ << "opTime"
+ << 3),
BSONObj(),
Milliseconds(8))));
}
@@ -353,9 +379,8 @@ TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponse) {
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "wrong type for opTime argument in replSetFresh "
- "response: int"));
+ countLogLinesContaining("wrong type for opTime argument in replSetFresh "
+ "response: int"));
}
TEST_F(FreshnessCheckerTest, ElectVetoed) {
@@ -363,7 +388,9 @@ TEST_F(FreshnessCheckerTest, ElectVetoed) {
startCapturingLogMessages();
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -390,9 +417,14 @@ TEST_F(FreshnessCheckerTest, ElectVetoed) {
<< "rs0"
<< "who"
<< "h1"
- << "cfgver" << 1 << "veto" << true << "errmsg"
+ << "cfgver"
+ << 1
+ << "veto"
+ << true
+ << "errmsg"
<< "I'd rather you didn't"
- << "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(0, 0).asLL())),
+ << "opTime"
+ << Date_t::fromMillisSinceEpoch(Timestamp(0, 0).asLL())),
BSONObj(),
Milliseconds(8))));
}
@@ -405,9 +437,8 @@ TEST_F(FreshnessCheckerTest, ElectVetoed) {
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "not electing self, h1:27017 would veto with "
- "'I'd rather you didn't'"));
+ countLogLinesContaining("not electing self, h1:27017 would veto with "
+ "'I'd rather you didn't'"));
}
int findIdForMember(const ReplicaSetConfig& rsConfig, const HostAndPort& host) {
@@ -419,18 +450,21 @@ int findIdForMember(const ReplicaSetConfig& rsConfig, const HostAndPort& host) {
TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestManyNodes) {
// one other responds as fresher than us
startCapturingLogMessages();
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h0")
- << BSON("_id" << 2 << "host"
- << "h1") << BSON("_id" << 3 << "host"
- << "h2")
- << BSON("_id" << 4 << "host"
- << "h3") << BSON("_id" << 5 << "host"
- << "h4"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1")
+ << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3")
+ << BSON("_id" << 5 << "host"
+ << "h4"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin(); mem != config.membersEnd();
@@ -475,18 +509,21 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestManyNodes) {
TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTimeManyNodes) {
// one other responds with a later optime than ours
startCapturingLogMessages();
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h0")
- << BSON("_id" << 2 << "host"
- << "h1") << BSON("_id" << 3 << "host"
- << "h2")
- << BSON("_id" << 4 << "host"
- << "h3") << BSON("_id" << 5 << "host"
- << "h4"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1")
+ << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3")
+ << BSON("_id" << 5 << "host"
+ << "h4"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = config.membersBegin(); mem != config.membersEnd();
@@ -545,18 +582,21 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTimeManyNodes
TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponseManyNodes) {
// one other responds with "opTime" field of non-Date value, causing not freshest
startCapturingLogMessages();
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h0")
- << BSON("_id" << 2 << "host"
- << "h1") << BSON("_id" << 3 << "host"
- << "h2")
- << BSON("_id" << 4 << "host"
- << "h3") << BSON("_id" << 5 << "host"
- << "h4"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1")
+ << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3")
+ << BSON("_id" << 5 << "host"
+ << "h4"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin(); mem != config.membersEnd();
@@ -597,26 +637,28 @@ TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponseManyNodes) {
stopCapturingLogMessages();
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "wrong type for opTime argument in replSetFresh "
- "response: int"));
+ countLogLinesContaining("wrong type for opTime argument in replSetFresh "
+ "response: int"));
}
TEST_F(FreshnessCheckerTest, ElectVetoedManyNodes) {
// one other responds with veto
startCapturingLogMessages();
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h0")
- << BSON("_id" << 2 << "host"
- << "h1") << BSON("_id" << 3 << "host"
- << "h2")
- << BSON("_id" << 4 << "host"
- << "h3") << BSON("_id" << 5 << "host"
- << "h4"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1")
+ << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3")
+ << BSON("_id" << 5 << "host"
+ << "h4"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin(); mem != config.membersEnd();
@@ -657,26 +699,28 @@ TEST_F(FreshnessCheckerTest, ElectVetoedManyNodes) {
stopCapturingLogMessages();
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "not electing self, h1:27017 would veto with "
- "'I'd rather you didn't'"));
+ countLogLinesContaining("not electing self, h1:27017 would veto with "
+ "'I'd rather you didn't'"));
}
TEST_F(FreshnessCheckerTest, ElectVetoedAndTiedFreshnessManyNodes) {
// one other responds with veto and another responds with tie
startCapturingLogMessages();
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h0")
- << BSON("_id" << 2 << "host"
- << "h1") << BSON("_id" << 3 << "host"
- << "h2")
- << BSON("_id" << 4 << "host"
- << "h3") << BSON("_id" << 5 << "host"
- << "h4"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1")
+ << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3")
+ << BSON("_id" << 5 << "host"
+ << "h4"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = config.membersBegin(); mem != config.membersEnd();
@@ -726,9 +770,8 @@ TEST_F(FreshnessCheckerTest, ElectVetoedAndTiedFreshnessManyNodes) {
_net->runUntil(startDate + Milliseconds(10));
ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
ASSERT_EQUALS(0,
- countLogLinesContaining(
- "not electing self, h4:27017 would veto with '"
- "errmsg: \"I'd rather you didn't\"'"));
+ countLogLinesContaining("not electing self, h4:27017 would veto with '"
+ "errmsg: \"I'd rather you didn't\"'"));
_net->runUntil(startDate + Milliseconds(20));
ASSERT_EQUALS(startDate + Milliseconds(20), _net->now());
_net->exitNetwork();
@@ -736,24 +779,26 @@ TEST_F(FreshnessCheckerTest, ElectVetoedAndTiedFreshnessManyNodes) {
stopCapturingLogMessages();
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "not electing self, h4:27017 would veto with "
- "'I'd rather you didn't'"));
+ countLogLinesContaining("not electing self, h4:27017 would veto with "
+ "'I'd rather you didn't'"));
}
TEST_F(FreshnessCheckerTest, ElectManyNodesNotAllRespond) {
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h0")
- << BSON("_id" << 2 << "host"
- << "h1") << BSON("_id" << 3 << "host"
- << "h2")
- << BSON("_id" << 4 << "host"
- << "h3") << BSON("_id" << 5 << "host"
- << "h4"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1")
+ << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3")
+ << BSON("_id" << 5 << "host"
+ << "h4"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin(); mem != config.membersEnd();
@@ -806,12 +851,15 @@ public:
ReplicaSetConfig config;
config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0")
<< BSON("_id" << 1 << "host"
- << "host1") << BSON("_id" << 2 << "host"
- << "host2"))));
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin();
diff --git a/src/mongo/db/repl/freshness_scanner.cpp b/src/mongo/db/repl/freshness_scanner.cpp
index ed4f160ee8f..fcba850e021 100644
--- a/src/mongo/db/repl/freshness_scanner.cpp
+++ b/src/mongo/db/repl/freshness_scanner.cpp
@@ -86,8 +86,9 @@ void FreshnessScanner::Algorithm::processResponse(const RemoteCommandRequest& re
int index = _rsConfig.findMemberIndexByHostAndPort(request.target);
FreshnessInfo freshnessInfo{index, lastOpTime};
- auto cmp =
- [](const FreshnessInfo& a, const FreshnessInfo& b) { return a.opTime > b.opTime; };
+ auto cmp = [](const FreshnessInfo& a, const FreshnessInfo& b) {
+ return a.opTime > b.opTime;
+ };
auto iter =
std::upper_bound(_freshnessInfos.begin(), _freshnessInfos.end(), freshnessInfo, cmp);
_freshnessInfos.insert(iter, freshnessInfo);
diff --git a/src/mongo/db/repl/freshness_scanner_test.cpp b/src/mongo/db/repl/freshness_scanner_test.cpp
index 5096b4ce9f2..53314298b5b 100644
--- a/src/mongo/db/repl/freshness_scanner_test.cpp
+++ b/src/mongo/db/repl/freshness_scanner_test.cpp
@@ -58,22 +58,29 @@ public:
}
virtual void setUp() {
- ASSERT_OK(
- _config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(
- BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1") << BSON("_id" << 2 << "host"
- << "host2")
- << BSON("_id" << 3 << "host"
- << "host3"
- << "votes" << 0 << "priority" << 0)
- << BSON("_id" << 4 << "host"
- << "host4"
- << "votes" << 0 << "priority" << 0)))));
+ ASSERT_OK(_config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)))));
ASSERT_OK(_config.validate());
_net = new NetworkInterfaceMock;
diff --git a/src/mongo/db/repl/is_master_response.cpp b/src/mongo/db/repl/is_master_response.cpp
index efd8bd5466e..78fe98e45de 100644
--- a/src/mongo/db/repl/is_master_response.cpp
+++ b/src/mongo/db/repl/is_master_response.cpp
@@ -219,7 +219,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Found \"" << kIsReplicaSetFieldName
<< "\" field which should indicate that no valid config "
"is loaded, but we didn't also have an \""
- << kInfoFieldName << "\" field as we expected");
+ << kInfoFieldName
+ << "\" field as we expected");
}
}
@@ -246,7 +247,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kHostsFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String) << " but found type "
+ << typeName(String)
+ << " but found type "
<< typeName(hostElement.type()));
}
_hosts.push_back(HostAndPort(hostElement.String()));
@@ -266,7 +268,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kPassivesFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String) << " but found type "
+ << typeName(String)
+ << " but found type "
<< typeName(passiveElement.type()));
}
_passives.push_back(HostAndPort(passiveElement.String()));
@@ -286,7 +289,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kArbitersFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String) << " but found type "
+ << typeName(String)
+ << " but found type "
<< typeName(arbiterElement.type()));
}
_arbiters.push_back(HostAndPort(arbiterElement.String()));
@@ -359,7 +363,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kTagsFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(String) << " but found type "
+ << typeName(String)
+ << " but found type "
<< typeName(tagsElement.type()));
}
_tags[tagElement.fieldNameStringData().toString()] = tagElement.String();
@@ -391,7 +396,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastWriteOpTimeFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Object) << " but found type "
+ << typeName(Object)
+ << " but found type "
<< typeName(lastWriteOpTimeElement.type()));
}
auto lastWriteOpTime = OpTime::parseFromOplogEntry(lastWriteOpTimeElement.Obj());
@@ -411,7 +417,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastWriteDateFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Date) << " but found type "
+ << typeName(Date)
+ << " but found type "
<< typeName(lastWriteDateElement.type()));
}
if (_lastWrite) {
@@ -431,7 +438,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastMajorityWriteOpTimeFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Object) << " but found type "
+ << typeName(Object)
+ << " but found type "
<< typeName(lastMajorityWriteOpTimeElement.type()));
}
auto lastMajorityWriteOpTime =
@@ -452,7 +460,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastMajorityWriteDateFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Date) << " but found type "
+ << typeName(Date)
+ << " but found type "
<< typeName(lastMajorityWriteDateElement.type()));
}
if (_lastMajorityWrite) {
diff --git a/src/mongo/db/repl/isself.cpp b/src/mongo/db/repl/isself.cpp
index 87cba6fe03b..50be2827f35 100644
--- a/src/mongo/db/repl/isself.cpp
+++ b/src/mongo/db/repl/isself.cpp
@@ -37,15 +37,15 @@
#include "mongo/base/init.h"
#include "mongo/bson/util/builder.h"
#include "mongo/client/dbclientinterface.h"
-#include "mongo/db/commands.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/internal_user_auth.h"
#include "mongo/db/auth/privilege.h"
-#include "mongo/util/scopeguard.h"
+#include "mongo/db/commands.h"
#include "mongo/util/log.h"
+#include "mongo/util/scopeguard.h"
#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__sun) || \
defined(__OpenBSD__)
@@ -66,11 +66,11 @@
#endif
#elif defined(_WIN32)
+#include <Ws2tcpip.h>
#include <boost/asio/detail/socket_ops.hpp>
#include <boost/system/error_code.hpp>
#include <iphlpapi.h>
#include <winsock2.h>
-#include <Ws2tcpip.h>
#endif // defined(_WIN32)
namespace mongo {
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index a368b4b91c6..d7695947687 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -975,8 +975,8 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
<< ((nextOpTime < syncedTo) ? "<??" : ">") << " syncedTo "
<< syncedTo.toStringLong() << '\n'
<< "time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs()) << "sec\n"
- << "tailing: " << tailing << '\n' << "data too stale, halting replication"
- << endl;
+ << "tailing: " << tailing << '\n'
+ << "data too stale, halting replication" << endl;
replInfo = replAllDead = "data too stale halted replication";
verify(syncedTo < nextOpTime);
throw SyncException();
diff --git a/src/mongo/db/repl/member_config_test.cpp b/src/mongo/db/repl/member_config_test.cpp
index b051451524c..eda31743ba7 100644
--- a/src/mongo/db/repl/member_config_test.cpp
+++ b/src/mongo/db/repl/member_config_test.cpp
@@ -62,7 +62,8 @@ TEST(MemberConfig, ParseFailsWithIllegalFieldName) {
ASSERT_EQUALS(ErrorCodes::BadValue,
mc.initialize(BSON("_id" << 0 << "host"
<< "localhost"
- << "frim" << 1),
+ << "frim"
+ << 1),
&tagConfig));
}
@@ -121,12 +122,14 @@ TEST(MemberConfig, ParseArbiterOnly) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "arbiterOnly" << 1.0),
+ << "arbiterOnly"
+ << 1.0),
&tagConfig));
ASSERT_TRUE(mc.isArbiter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "arbiterOnly" << false),
+ << "arbiterOnly"
+ << false),
&tagConfig));
ASSERT_TRUE(!mc.isArbiter());
}
@@ -136,12 +139,14 @@ TEST(MemberConfig, ParseHidden) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "hidden" << 1.0),
+ << "hidden"
+ << 1.0),
&tagConfig));
ASSERT_TRUE(mc.isHidden());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "hidden" << false),
+ << "hidden"
+ << false),
&tagConfig));
ASSERT_TRUE(!mc.isHidden());
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
@@ -157,12 +162,14 @@ TEST(MemberConfig, ParseBuildIndexes) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "buildIndexes" << 1.0),
+ << "buildIndexes"
+ << 1.0),
&tagConfig));
ASSERT_TRUE(mc.shouldBuildIndexes());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "buildIndexes" << false),
+ << "buildIndexes"
+ << false),
&tagConfig));
ASSERT_TRUE(!mc.shouldBuildIndexes());
}
@@ -172,40 +179,49 @@ TEST(MemberConfig, ParseVotes) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 1.0),
+ << "votes"
+ << 1.0),
&tagConfig));
ASSERT_TRUE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 0 << "priority" << 0),
+ << "votes"
+ << 0
+ << "priority"
+ << 0),
&tagConfig));
ASSERT_FALSE(mc.isVoter());
// For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 1.5),
+ << "votes"
+ << 1.5),
&tagConfig));
ASSERT_TRUE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 0.5),
+ << "votes"
+ << 0.5),
&tagConfig));
ASSERT_FALSE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << -0.5),
+ << "votes"
+ << -0.5),
&tagConfig));
ASSERT_FALSE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 2),
+ << "votes"
+ << 2),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << Date_t::fromMillisSinceEpoch(2)),
+ << "votes"
+ << Date_t::fromMillisSinceEpoch(2)),
&tagConfig));
}
@@ -214,24 +230,28 @@ TEST(MemberConfig, ParsePriority) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1),
+ << "priority"
+ << 1),
&tagConfig));
ASSERT_EQUALS(1.0, mc.getPriority());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 0),
+ << "priority"
+ << 0),
&tagConfig));
ASSERT_EQUALS(0.0, mc.getPriority());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 100.8),
+ << "priority"
+ << 100.8),
&tagConfig));
ASSERT_EQUALS(100.8, mc.getPriority());
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << Date_t::fromMillisSinceEpoch(2)),
+ << "priority"
+ << Date_t::fromMillisSinceEpoch(2)),
&tagConfig));
}
@@ -240,7 +260,8 @@ TEST(MemberConfig, ParseSlaveDelay) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "slaveDelay" << 100),
+ << "slaveDelay"
+ << 100),
&tagConfig));
ASSERT_EQUALS(Seconds(100), mc.getSlaveDelay());
}
@@ -250,10 +271,11 @@ TEST(MemberConfig, ParseTags) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "tags" << BSON("k1"
- << "v1"
- << "k2"
- << "v2")),
+ << "tags"
+ << BSON("k1"
+ << "v1"
+ << "k2"
+ << "v2")),
&tagConfig));
ASSERT_EQUALS(5U, mc.getNumTags());
ASSERT_EQUALS(5, std::distance(mc.tagsBegin(), mc.tagsEnd()));
@@ -284,14 +306,18 @@ TEST(MemberConfig, ValidateVotes) {
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 1.0),
+ << "votes"
+ << 1.0),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_TRUE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 0 << "priority" << 0),
+ << "votes"
+ << 0
+ << "priority"
+ << 0),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -299,21 +325,28 @@ TEST(MemberConfig, ValidateVotes) {
// For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 1.5),
+ << "votes"
+ << 1.5),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_TRUE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 0.5 << "priority" << 0),
+ << "votes"
+ << 0.5
+ << "priority"
+ << 0),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << -0.5 << "priority" << 0),
+ << "votes"
+ << -0.5
+ << "priority"
+ << 0),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -321,13 +354,15 @@ TEST(MemberConfig, ValidateVotes) {
// Invalid values
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 2),
+ << "votes"
+ << 2),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << -1),
+ << "votes"
+ << -1),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -337,22 +372,26 @@ TEST(MemberConfig, ValidatePriorityRanges) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 0),
+ << "priority"
+ << 0),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1000),
+ << "priority"
+ << 1000),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << -1),
+ << "priority"
+ << -1),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1001),
+ << "priority"
+ << 1001),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -362,22 +401,34 @@ TEST(MemberConfig, ValidateSlaveDelays) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 0 << "slaveDelay" << 0),
+ << "priority"
+ << 0
+ << "slaveDelay"
+ << 0),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 0 << "slaveDelay" << 3600 * 10),
+ << "priority"
+ << 0
+ << "slaveDelay"
+ << 3600 * 10),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 0 << "slaveDelay" << -1),
+ << "priority"
+ << 0
+ << "slaveDelay"
+ << -1),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 0 << "slaveDelay" << 3600 * 24 * 400),
+ << "priority"
+ << 0
+ << "slaveDelay"
+ << 3600 * 24 * 400),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -387,7 +438,10 @@ TEST(MemberConfig, ValidatePriorityAndSlaveDelayRelationship) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1 << "slaveDelay" << 60),
+ << "priority"
+ << 1
+ << "slaveDelay"
+ << 60),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -397,12 +451,18 @@ TEST(MemberConfig, ValidatePriorityAndHiddenRelationship) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1 << "hidden" << true),
+ << "priority"
+ << 1
+ << "hidden"
+ << true),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1 << "hidden" << false),
+ << "priority"
+ << 1
+ << "hidden"
+ << false),
&tagConfig));
ASSERT_OK(mc.validate());
}
@@ -412,13 +472,19 @@ TEST(MemberConfig, ValidatePriorityAndBuildIndexesRelationship) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1 << "buildIndexes" << false),
+ << "priority"
+ << 1
+ << "buildIndexes"
+ << false),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1 << "buildIndexes" << true),
+ << "priority"
+ << 1
+ << "buildIndexes"
+ << true),
&tagConfig));
ASSERT_OK(mc.validate());
}
@@ -428,25 +494,38 @@ TEST(MemberConfig, ValidateArbiterVotesRelationship) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 1 << "arbiterOnly" << true),
+ << "votes"
+ << 1
+ << "arbiterOnly"
+ << true),
&tagConfig));
ASSERT_OK(mc.validate());
- ASSERT_OK(
- mc.initialize(BSON("_id" << 0 << "host"
- << "h"
- << "votes" << 0 << "priority" << 0 << "arbiterOnly" << false),
- &tagConfig));
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes"
+ << 0
+ << "priority"
+ << 0
+ << "arbiterOnly"
+ << false),
+ &tagConfig));
ASSERT_OK(mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 1 << "arbiterOnly" << false),
+ << "votes"
+ << 1
+ << "arbiterOnly"
+ << false),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 0 << "arbiterOnly" << true),
+ << "votes"
+ << 0
+ << "arbiterOnly"
+ << true),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
diff --git a/src/mongo/db/repl/old_update_position_args.cpp b/src/mongo/db/repl/old_update_position_args.cpp
index 67575d7e280..92ffd74db79 100644
--- a/src/mongo/db/repl/old_update_position_args.cpp
+++ b/src/mongo/db/repl/old_update_position_args.cpp
@@ -141,8 +141,10 @@ BSONObj OldUpdatePositionArgs::toBSON() const {
++update) {
updateArray.append(BSON(kMemberRIDFieldName << update->rid << kOpTimeFieldName
<< update->ts.getTimestamp()
- << kConfigVersionFieldName << update->cfgver
- << kMemberIdFieldName << update->memberId));
+ << kConfigVersionFieldName
+ << update->cfgver
+ << kMemberIdFieldName
+ << update->memberId));
}
updateArray.doneFast();
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 0d66d00aad6..2a9e7e0b40c 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -431,7 +431,7 @@ void logOps(OperationContext* txn,
txn, opstr, nss, begin[i], NULL, fromMigrate, slots[i].opTime, slots[i].hash));
}
- std::unique_ptr<DocWriter const* []> basePtrs(new DocWriter const* [count]);
+ std::unique_ptr<DocWriter const* []> basePtrs(new DocWriter const*[count]);
for (size_t i = 0; i < count; i++) {
basePtrs[i] = &writers[i];
}
@@ -605,72 +605,73 @@ struct ApplyOpMetadata {
std::map<std::string, ApplyOpMetadata> opsMap = {
{"create",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd)
- -> Status { return createCollection(txn, NamespaceString(ns).db().toString(), cmd); },
- {ErrorCodes::NamespaceExists}}},
- {"collMod",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ return createCollection(txn, NamespaceString(ns).db().toString(), cmd);
+ },
+ {ErrorCodes::NamespaceExists}}},
+ {"collMod", {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
BSONObjBuilder resultWeDontCareAbout;
return collMod(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
}}},
{"dropDatabase",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd)
- -> Status { return dropDatabase(txn, NamespaceString(ns).db().toString()); },
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ return dropDatabase(txn, NamespaceString(ns).db().toString());
+ },
{ErrorCodes::NamespaceNotFound}}},
{"drop",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropCollection(txn, parseNs(ns, cmd), resultWeDontCareAbout);
- },
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropCollection(txn, parseNs(ns, cmd), resultWeDontCareAbout);
+ },
// IllegalOperation is necessary because in 3.0 we replicate drops of system.profile
// TODO(dannenberg) remove IllegalOperation once we no longer need 3.0 compatibility
{ErrorCodes::NamespaceNotFound, ErrorCodes::IllegalOperation}}},
// deleteIndex(es) is deprecated but still works as of April 10, 2015
{"deleteIndex",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- },
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ },
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"deleteIndexes",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- },
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ },
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"dropIndex",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- },
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ },
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"dropIndexes",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- },
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ },
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"renameCollection",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- return renameCollection(txn,
- NamespaceString(cmd.firstElement().valuestrsafe()),
- NamespaceString(cmd["to"].valuestrsafe()),
- cmd["dropTarget"].trueValue(),
- cmd["stayTemp"].trueValue());
- },
+ return renameCollection(txn,
+ NamespaceString(cmd.firstElement().valuestrsafe()),
+ NamespaceString(cmd["to"].valuestrsafe()),
+ cmd["dropTarget"].trueValue(),
+ cmd["stayTemp"].trueValue());
+ },
{ErrorCodes::NamespaceNotFound, ErrorCodes::NamespaceExists}}},
{"applyOps",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return applyOps(txn, nsToDatabase(ns), cmd, &resultWeDontCareAbout);
- },
+ BSONObjBuilder resultWeDontCareAbout;
+ return applyOps(txn, nsToDatabase(ns), cmd, &resultWeDontCareAbout);
+ },
{ErrorCodes::UnknownError}}},
- {"convertToCapped",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd)
- -> Status { return convertToCapped(txn, parseNs(ns, cmd), cmd["size"].number()); }}},
- {"emptycapped",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd)
- -> Status { return emptyCapped(txn, parseNs(ns, cmd)); }}},
+ {"convertToCapped", {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ return convertToCapped(txn, parseNs(ns, cmd), cmd["size"].number());
+ }}},
+ {"emptycapped", {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ return emptyCapped(txn, parseNs(ns, cmd));
+ }}},
};
} // namespace
@@ -742,7 +743,9 @@ Status applyOperation_inlock(OperationContext* txn,
indexNss.isValid());
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Database name mismatch for database ("
- << nsToDatabaseSubstring(ns) << ") while creating index: " << op,
+ << nsToDatabaseSubstring(ns)
+ << ") while creating index: "
+ << op,
nsToDatabaseSubstring(ns) == indexNss.db());
opCounters->gotInsert();
@@ -773,10 +776,10 @@ Status applyOperation_inlock(OperationContext* txn,
}
return Status::OK();
}
- uassert(
- ErrorCodes::NamespaceNotFound,
- str::stream() << "Failed to apply insert due to missing collection: " << op.toString(),
- collection);
+ uassert(ErrorCodes::NamespaceNotFound,
+ str::stream() << "Failed to apply insert due to missing collection: "
+ << op.toString(),
+ collection);
if (fieldO.type() == Array) {
// Batched inserts.
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index d009083c709..c04fdd85579 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -32,8 +32,8 @@
#include <deque>
#include <string>
-#include "mongo/base/status.h"
#include "mongo/base/disallow_copying.h"
+#include "mongo/base/status.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/stdx/functional.h"
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index ec02d16ce1a..f43e53817f7 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -127,17 +127,26 @@ Status checkRemoteOplogStart(const Fetcher::Documents& documents, OpTimeWithHash
if (!opTimeResult.isOK()) {
return Status(ErrorCodes::OplogStartMissing,
str::stream() << "our last op time fetched: " << lastFetched.opTime.toString()
- << " (hash: " << lastFetched.value << ")"
+ << " (hash: "
+ << lastFetched.value
+ << ")"
<< ". failed to parse optime from first oplog on source: "
- << o.toString() << ": " << opTimeResult.getStatus().toString());
+ << o.toString()
+ << ": "
+ << opTimeResult.getStatus().toString());
}
auto opTime = opTimeResult.getValue();
long long hash = o["h"].numberLong();
if (opTime != lastFetched.opTime || hash != lastFetched.value) {
return Status(ErrorCodes::OplogStartMissing,
str::stream() << "our last op time fetched: " << lastFetched.opTime.toString()
- << ". source's GTE: " << opTime.toString() << " hashes: ("
- << lastFetched.value << "/" << hash << ")");
+ << ". source's GTE: "
+ << opTime.toString()
+ << " hashes: ("
+ << lastFetched.value
+ << "/"
+ << hash
+ << ")");
}
return Status::OK();
}
@@ -149,7 +158,8 @@ StatusWith<OplogFetcher::DocumentsInfo> OplogFetcher::validateDocuments(
if (first && documents.empty()) {
return Status(ErrorCodes::OplogStartMissing,
str::stream() << "The first batch of oplog entries is empty, but expected at "
- "least 1 document matching ts: " << lastTS.toString());
+ "least 1 document matching ts: "
+ << lastTS.toString());
}
DocumentsInfo info;
@@ -178,8 +188,11 @@ StatusWith<OplogFetcher::DocumentsInfo> OplogFetcher::validateDocuments(
if (lastTS >= docTS) {
return Status(ErrorCodes::OplogOutOfOrder,
str::stream() << "Out of order entries in oplog. lastTS: "
- << lastTS.toString() << " outOfOrderTS:" << docTS.toString()
- << " at count:" << info.networkDocumentCount);
+ << lastTS.toString()
+ << " outOfOrderTS:"
+ << docTS.toString()
+ << " at count:"
+ << info.networkDocumentCount);
}
lastTS = docTS;
}
@@ -348,12 +361,14 @@ void OplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
if (_dataReplicatorExternalState->shouldStopFetching(_fetcher.getSource(), metadata)) {
_onShutdown(Status(ErrorCodes::InvalidSyncSource,
- str::stream()
- << "sync source " << _fetcher.getSource().toString()
- << " (last optime: " << metadata.getLastOpVisible().toString()
- << "; sync source index: " << metadata.getSyncSourceIndex()
- << "; primary index: " << metadata.getPrimaryIndex()
- << ") is no longer valid"),
+ str::stream() << "sync source " << _fetcher.getSource().toString()
+ << " (last optime: "
+ << metadata.getLastOpVisible().toString()
+ << "; sync source index: "
+ << metadata.getSyncSourceIndex()
+ << "; primary index: "
+ << metadata.getPrimaryIndex()
+ << ") is no longer valid"),
opTimeWithHash);
return;
}
diff --git a/src/mongo/db/repl/oplog_fetcher_test.cpp b/src/mongo/db/repl/oplog_fetcher_test.cpp
index 49875369842..6ec6ab36bc4 100644
--- a/src/mongo/db/repl/oplog_fetcher_test.cpp
+++ b/src/mongo/db/repl/oplog_fetcher_test.cpp
@@ -316,7 +316,8 @@ TEST_F(
_createConfig(true),
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getCommandObject_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getCommandObject_forTest();
ASSERT_EQUALS(mongo::BSONType::Object, cmdObj["filter"].type());
ASSERT_EQUALS(BSON("ts" << BSON("$gte" << lastFetched.opTime.getTimestamp())),
cmdObj["filter"].Obj());
@@ -335,7 +336,8 @@ TEST_F(
_createConfig(true),
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getCommandObject_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getCommandObject_forTest();
ASSERT_EQUALS(mongo::BSONType::Object, cmdObj["filter"].type());
ASSERT_EQUALS(BSON("ts" << BSON("$gte" << lastFetched.opTime.getTimestamp())),
cmdObj["filter"].Obj());
@@ -351,7 +353,8 @@ TEST_F(OplogFetcherTest, MetadataObjectContainsReplSetMetadataFieldUnderProtocol
_createConfig(true),
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getMetadataObject_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getMetadataObject_forTest();
ASSERT_EQUALS(1, metadataObj.nFields());
ASSERT_EQUALS(1, metadataObj[rpc::kReplSetMetadataFieldName].numberInt());
}
@@ -364,7 +367,8 @@ TEST_F(OplogFetcherTest, MetadataObjectIsEmptyUnderProtocolVersion0) {
_createConfig(false),
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getMetadataObject_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getMetadataObject_forTest();
ASSERT_EQUALS(BSONObj(), metadataObj);
}
@@ -377,7 +381,8 @@ TEST_F(OplogFetcherTest, RemoteCommandTimeoutShouldEqualElectionTimeout) {
config,
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getRemoteCommandTimeout_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getRemoteCommandTimeout_forTest();
ASSERT_EQUALS(config.getElectionTimeoutPeriod(), timeout);
}
@@ -390,7 +395,8 @@ TEST_F(OplogFetcherTest, AwaitDataTimeoutShouldEqualHalfElectionTimeoutUnderProt
config,
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getAwaitDataTimeout_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getAwaitDataTimeout_forTest();
ASSERT_EQUALS(config.getElectionTimeoutPeriod() / 2, timeout);
}
@@ -402,7 +408,8 @@ TEST_F(OplogFetcherTest, AwaitDataTimeoutShouldBeAConstantUnderProtocolVersion0)
_createConfig(false),
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getAwaitDataTimeout_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getAwaitDataTimeout_forTest();
ASSERT_EQUALS(OplogFetcher::kDefaultProtocolZeroAwaitDataTimeout, timeout);
}
diff --git a/src/mongo/db/repl/oplog_interface_local.cpp b/src/mongo/db/repl/oplog_interface_local.cpp
index 43a74b15d54..88aa50436ee 100644
--- a/src/mongo/db/repl/oplog_interface_local.cpp
+++ b/src/mongo/db/repl/oplog_interface_local.cpp
@@ -91,8 +91,8 @@ OplogInterfaceLocal::OplogInterfaceLocal(OperationContext* txn, const std::strin
std::string OplogInterfaceLocal::toString() const {
return str::stream() << "LocalOplogInterface: "
- "operation context: " << _txn->getOpID()
- << "; collection: " << _collectionName;
+ "operation context: "
+ << _txn->getOpID() << "; collection: " << _collectionName;
}
std::unique_ptr<OplogInterface::Iterator> OplogInterfaceLocal::makeIterator() const {
diff --git a/src/mongo/db/repl/oplog_interface_mock.h b/src/mongo/db/repl/oplog_interface_mock.h
index 524ab3c8d2f..7c1b32c506f 100644
--- a/src/mongo/db/repl/oplog_interface_mock.h
+++ b/src/mongo/db/repl/oplog_interface_mock.h
@@ -28,8 +28,8 @@
#pragma once
-#include <initializer_list>
#include "mongo/db/repl/oplog_interface.h"
+#include <initializer_list>
namespace mongo {
namespace repl {
diff --git a/src/mongo/db/repl/optime_extract_test.cpp b/src/mongo/db/repl/optime_extract_test.cpp
index ef1d82dff7d..5f5f5800e24 100644
--- a/src/mongo/db/repl/optime_extract_test.cpp
+++ b/src/mongo/db/repl/optime_extract_test.cpp
@@ -49,7 +49,8 @@ TEST(ExtractBSON, ExtractOpTimeField) {
// Missing timestamp field.
obj = BSON("a" << BSON("ts"
<< "notATimestamp"
- << "t" << 2));
+ << "t"
+ << 2));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, bsonExtractOpTimeField(obj, "a", &opTime));
// Wrong typed timestamp field.
obj = BSON("a" << BSON("t" << 2));
diff --git a/src/mongo/db/repl/read_concern_args.cpp b/src/mongo/db/repl/read_concern_args.cpp
index 097bdc78655..79c9d7b65b4 100644
--- a/src/mongo/db/repl/read_concern_args.cpp
+++ b/src/mongo/db/repl/read_concern_args.cpp
@@ -125,7 +125,8 @@ Status ReadConcernArgs::initialize(const BSONElement& readConcernElem) {
} else {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Unrecognized option in " << kReadConcernFieldName
- << ": " << fieldName);
+ << ": "
+ << fieldName);
}
}
diff --git a/src/mongo/db/repl/read_concern_args_test.cpp b/src/mongo/db/repl/read_concern_args_test.cpp
index 57364f07d14..75843f5a945 100644
--- a/src/mongo/db/repl/read_concern_args_test.cpp
+++ b/src/mongo/db/repl/read_concern_args_test.cpp
@@ -38,12 +38,13 @@ namespace {
TEST(ReadAfterParse, ReadAfterOnly) {
ReadConcernArgs readAfterOpTime;
- ASSERT_OK(readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName
- << Timestamp(20, 30)
- << OpTime::kTermFieldName << 2)))));
+ ASSERT_OK(readAfterOpTime.initialize(BSON(
+ "find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
+ << 2)))));
ASSERT_EQ(Timestamp(20, 30), readAfterOpTime.getOpTime().getTimestamp());
ASSERT_EQ(2, readAfterOpTime.getOpTime().getTerm());
@@ -54,7 +55,8 @@ TEST(ReadAfterParse, ReadCommitLevelOnly) {
ReadConcernArgs readAfterOpTime;
ASSERT_OK(
readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "majority"))));
ASSERT_TRUE(readAfterOpTime.getOpTime().isNull());
@@ -63,13 +65,15 @@ TEST(ReadAfterParse, ReadCommitLevelOnly) {
TEST(ReadAfterParse, ReadCommittedFullSpecification) {
ReadConcernArgs readAfterOpTime;
- ASSERT_OK(readAfterOpTime.initialize(
- BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30)
- << OpTime::kTermFieldName << 2)
- << ReadConcernArgs::kLevelFieldName << "majority"))));
+ ASSERT_OK(readAfterOpTime.initialize(BSON(
+ "find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
+ << 2)
+ << ReadConcernArgs::kLevelFieldName
+ << "majority"))));
ASSERT_EQ(Timestamp(20, 30), readAfterOpTime.getOpTime().getTimestamp());
ASSERT_EQ(2, readAfterOpTime.getOpTime().getTerm());
@@ -87,24 +91,26 @@ TEST(ReadAfterParse, Empty) {
TEST(ReadAfterParse, BadRootType) {
ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(
- readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
- << "x")));
+ ASSERT_NOT_OK(readAfterOpTime.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << "x")));
}
TEST(ReadAfterParse, BadOpTimeType) {
ReadConcernArgs readAfterOpTime;
ASSERT_NOT_OK(
readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName << 2))));
}
TEST(ReadAfterParse, OpTimeNotNeededForValidReadConcern) {
ReadConcernArgs readAfterOpTime;
ASSERT_OK(readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSONObj())));
}
@@ -112,47 +118,49 @@ TEST(ReadAfterParse, NoOpTimeTS) {
ReadConcernArgs readAfterOpTime;
ASSERT_NOT_OK(
readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName
<< BSON(OpTime::kTimestampFieldName << 2)))));
}
TEST(ReadAfterParse, NoOpTimeTerm) {
ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(
- readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTermFieldName << 2)))));
+ ASSERT_NOT_OK(readAfterOpTime.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTermFieldName << 2)))));
}
TEST(ReadAfterParse, BadOpTimeTSType) {
ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(
- readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName
- << BSON("x" << 1) << OpTime::kTermFieldName
- << 2)))));
+ ASSERT_NOT_OK(readAfterOpTime.initialize(
+ BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName << BSON("x" << 1) << OpTime::kTermFieldName
+ << 2)))));
}
TEST(ReadAfterParse, BadOpTimeTermType) {
ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(
- readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName
- << Timestamp(1, 0) << OpTime::kTermFieldName
- << "y")))));
+ ASSERT_NOT_OK(readAfterOpTime.initialize(BSON(
+ "find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName << Timestamp(1, 0) << OpTime::kTermFieldName
+ << "y")))));
}
TEST(ReadAfterParse, BadLevelType) {
ReadConcernArgs readAfterOpTime;
ASSERT_EQ(ErrorCodes::TypeMismatch,
readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << 7))));
}
@@ -160,7 +168,8 @@ TEST(ReadAfterParse, BadLevelValue) {
ReadConcernArgs readAfterOpTime;
ASSERT_EQ(ErrorCodes::FailedToParse,
readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName
<< "seven is not a real level"))));
}
@@ -169,7 +178,8 @@ TEST(ReadAfterParse, BadOption) {
ReadConcernArgs readAfterOpTime;
ASSERT_EQ(ErrorCodes::InvalidOptions,
readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSON("asdf" << 1))));
}
@@ -188,10 +198,10 @@ TEST(ReadAfterSerialize, ReadAfterOnly) {
ReadConcernArgs readAfterOpTime(OpTime(Timestamp(20, 30), 2), boost::none);
readAfterOpTime.appendInfo(&builder);
- BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName << BSON(
- OpTime::kTimestampFieldName
- << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
+ BSONObj expectedObj(BSON(
+ ReadConcernArgs::kReadConcernFieldName << BSON(
+ ReadConcernArgs::kAfterOpTimeFieldName << BSON(
+ OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
ASSERT_EQ(expectedObj, builder.done());
}
@@ -213,11 +223,13 @@ TEST(ReadAfterSerialize, FullSpecification) {
ReadConcernLevel::kMajorityReadConcern);
readAfterOpTime.appendInfo(&builder);
- BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName << BSON(
- ReadConcernArgs::kLevelFieldName
- << "majority" << ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName
- << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
+ BSONObj expectedObj(BSON(
+ ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName
+ << "majority"
+ << ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
+ << 2))));
ASSERT_EQ(expectedObj, builder.done());
}
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.cpp b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
index f7388355c98..032ef5e3b1d 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
@@ -261,18 +261,18 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc, long long term)
if (memberStateElement.eoo()) {
_stateSet = false;
} else if (memberStateElement.type() != NumberInt && memberStateElement.type() != NumberLong) {
- return Status(ErrorCodes::TypeMismatch,
- str::stream()
- << "Expected \"" << kMemberStateFieldName
+ return Status(
+ ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"" << kMemberStateFieldName
<< "\" field in response to replSetHeartbeat "
"command to have type NumberInt or NumberLong, but found type "
<< typeName(memberStateElement.type()));
} else {
long long stateInt = memberStateElement.numberLong();
if (stateInt < 0 || stateInt > MemberState::RS_MAX) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "Value for \"" << kMemberStateFieldName
+ return Status(
+ ErrorCodes::BadValue,
+ str::stream() << "Value for \"" << kMemberStateFieldName
<< "\" in response to replSetHeartbeat is "
"out of range; legal values are non-negative and no more than "
<< MemberState::RS_MAX);
@@ -312,7 +312,8 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc, long long term)
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected \"" << kHbMessageFieldName
<< "\" field in response to replSetHeartbeat to have "
- "type String, but found " << typeName(hbMsgElement.type()));
+ "type String, but found "
+ << typeName(hbMsgElement.type()));
} else {
_hbmsg = hbMsgElement.String();
}
@@ -339,7 +340,8 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc, long long term)
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected \"" << kConfigFieldName
<< "\" in response to replSetHeartbeat to have type "
- "Object, but found " << typeName(rsConfigElement.type()));
+ "Object, but found "
+ << typeName(rsConfigElement.type()));
}
_configSet = true;
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
index 87634a155a5..80548c1aacf 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
@@ -713,7 +713,8 @@ TEST(ReplSetHeartbeatResponse, InitializeHeartbeatMeessageWrongType) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj =
BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
- "hbmsg" << 4);
+ "hbmsg"
+ << 4);
Status result = hbResponse.initialize(initializerObj, 0);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -726,7 +727,8 @@ TEST(ReplSetHeartbeatResponse, InitializeSyncingToWrongType) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj =
BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
- "syncingTo" << 4);
+ "syncingTo"
+ << 4);
Status result = hbResponse.initialize(initializerObj, 0);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -739,7 +741,8 @@ TEST(ReplSetHeartbeatResponse, InitializeConfigWrongType) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj =
BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
- "config" << 4);
+ "config"
+ << 4);
Status result = hbResponse.initialize(initializerObj, 0);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -752,7 +755,8 @@ TEST(ReplSetHeartbeatResponse, InitializeBadConfig) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj =
BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
- "config" << BSON("illegalFieldName" << 2));
+ "config"
+ << BSON("illegalFieldName" << 2));
Status result = hbResponse.initialize(initializerObj, 0);
ASSERT_EQUALS(ErrorCodes::BadValue, result);
ASSERT_EQUALS("Unexpected field illegalFieldName in replica set configuration",
diff --git a/src/mongo/db/repl/repl_set_html_summary.cpp b/src/mongo/db/repl/repl_set_html_summary.cpp
index 218dff908fd..14c2ff81b7d 100644
--- a/src/mongo/db/repl/repl_set_html_summary.cpp
+++ b/src/mongo/db/repl/repl_set_html_summary.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/repl/repl_set_html_summary.h"
-#include <string>
#include <sstream>
+#include <string>
#include "mongo/util/mongoutils/html.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/repl/repl_settings.h b/src/mongo/db/repl/repl_settings.h
index 36705f1ce74..b7c6918d75d 100644
--- a/src/mongo/db/repl/repl_settings.h
+++ b/src/mongo/db/repl/repl_settings.h
@@ -32,8 +32,8 @@
#include <string>
#include "mongo/db/jsobj.h"
-#include "mongo/util/concurrency/mutex.h"
#include "mongo/db/repl/bgsync.h"
+#include "mongo/util/concurrency/mutex.h"
namespace mongo {
namespace repl {
diff --git a/src/mongo/db/repl/replica_set_config.cpp b/src/mongo/db/repl/replica_set_config.cpp
index 8f619f6a5d9..b3d0d8f4fc8 100644
--- a/src/mongo/db/repl/replica_set_config.cpp
+++ b/src/mongo/db/repl/replica_set_config.cpp
@@ -128,7 +128,8 @@ Status ReplicaSetConfig::_initialize(const BSONObj& cfg,
if (memberElement.type() != Object) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected type of " << kMembersFieldName << "."
- << memberElement.fieldName() << " to be Object, but found "
+ << memberElement.fieldName()
+ << " to be Object, but found "
<< typeName(memberElement.type()));
}
_members.resize(_members.size() + 1);
@@ -200,7 +201,8 @@ Status ReplicaSetConfig::_initialize(const BSONObj& cfg,
str::stream() << "replica set configuration cannot contain '"
<< kReplicaSetIdFieldName
<< "' "
- "field when called from replSetInitiate: " << cfg);
+ "field when called from replSetInitiate: "
+ << cfg);
}
_replicaSetId = OID::gen();
} else if (!_replicaSetId.isSet()) {
@@ -312,8 +314,10 @@ Status ReplicaSetConfig::_parseSettingsSubdocument(const BSONObj& settings) {
if (modeElement.type() != Object) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName << '.'
- << modeElement.fieldName() << " to be an Object, not "
+ << kGetLastErrorModesFieldName
+ << '.'
+ << modeElement.fieldName()
+ << " to be an Object, not "
<< typeName(modeElement.type()));
}
ReplicaSetTagPattern pattern = _tagConfig.makePattern();
@@ -321,20 +325,26 @@ Status ReplicaSetConfig::_parseSettingsSubdocument(const BSONObj& settings) {
const BSONElement constraintElement = constraintIter.next();
if (!constraintElement.isNumber()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream()
- << "Expected " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName << '.' << modeElement.fieldName()
- << '.' << constraintElement.fieldName() << " to be a number, not "
- << typeName(constraintElement.type()));
+ str::stream() << "Expected " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName
+ << '.'
+ << modeElement.fieldName()
+ << '.'
+ << constraintElement.fieldName()
+ << " to be a number, not "
+ << typeName(constraintElement.type()));
}
const int minCount = constraintElement.numberInt();
if (minCount <= 0) {
return Status(ErrorCodes::BadValue,
str::stream() << "Value of " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName << '.'
- << modeElement.fieldName() << '.'
+ << kGetLastErrorModesFieldName
+ << '.'
+ << modeElement.fieldName()
+ << '.'
<< constraintElement.fieldName()
- << " must be positive, but found " << minCount);
+ << " must be positive, but found "
+ << minCount);
}
status = _tagConfig.addTagCountConstraintToPattern(
&pattern, constraintElement.fieldNameStringData(), minCount);
@@ -370,7 +380,8 @@ Status ReplicaSetConfig::validate() const {
if (_replSetName.empty()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Replica set configuration must have non-empty "
- << kIdFieldName << " field");
+ << kIdFieldName
+ << " field");
}
if (_heartbeatInterval < Milliseconds(0)) {
return Status(ErrorCodes::BadValue,
@@ -413,22 +424,41 @@ Status ReplicaSetConfig::validate() const {
const MemberConfig& memberJ = _members[j];
if (memberI.getId() == memberJ.getId()) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "Found two member configurations with same "
- << MemberConfig::kIdFieldName << " field, " << kMembersFieldName
- << "." << i << "." << MemberConfig::kIdFieldName
- << " == " << kMembersFieldName << "." << j << "."
- << MemberConfig::kIdFieldName << " == " << memberI.getId());
+ str::stream() << "Found two member configurations with same "
+ << MemberConfig::kIdFieldName
+ << " field, "
+ << kMembersFieldName
+ << "."
+ << i
+ << "."
+ << MemberConfig::kIdFieldName
+ << " == "
+ << kMembersFieldName
+ << "."
+ << j
+ << "."
+ << MemberConfig::kIdFieldName
+ << " == "
+ << memberI.getId());
}
if (memberI.getHostAndPort() == memberJ.getHostAndPort()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Found two member configurations with same "
- << MemberConfig::kHostFieldName << " field, "
- << kMembersFieldName << "." << i << "."
<< MemberConfig::kHostFieldName
- << " == " << kMembersFieldName << "." << j << "."
+ << " field, "
+ << kMembersFieldName
+ << "."
+ << i
+ << "."
+ << MemberConfig::kHostFieldName
+ << " == "
+ << kMembersFieldName
+ << "."
+ << j
+ << "."
<< MemberConfig::kHostFieldName
- << " == " << memberI.getHostAndPort().toString());
+ << " == "
+ << memberI.getHostAndPort().toString());
}
}
}
@@ -438,7 +468,9 @@ Status ReplicaSetConfig::validate() const {
ErrorCodes::BadValue,
str::stream()
<< "Either all host names in a replica set configuration must be localhost "
- "references, or none must be; found " << localhostCount << " out of "
+ "references, or none must be; found "
+ << localhostCount
+ << " out of "
<< _members.size());
}
@@ -474,7 +506,8 @@ Status ReplicaSetConfig::validate() const {
if (_protocolVersion != 0 && _protocolVersion != 1) {
return Status(ErrorCodes::BadValue,
str::stream() << kProtocolVersionFieldName << " field value of "
- << _protocolVersion << " is not 1 or 0");
+ << _protocolVersion
+ << " is not 1 or 0");
}
if (_configServer) {
@@ -546,7 +579,8 @@ Status ReplicaSetConfig::checkIfWriteConcernCanBeSatisfied(
// write concern mode.
return Status(ErrorCodes::CannotSatisfyWriteConcern,
str::stream() << "Not enough nodes match write concern mode \""
- << writeConcern.wMode << "\"");
+ << writeConcern.wMode
+ << "\"");
} else {
int nodesRemaining = writeConcern.wNumNodes;
for (size_t j = 0; j < _members.size(); ++j) {
diff --git a/src/mongo/db/repl/replica_set_config_checks.cpp b/src/mongo/db/repl/replica_set_config_checks.cpp
index 6539bd08e3f..a45985d3cb4 100644
--- a/src/mongo/db/repl/replica_set_config_checks.cpp
+++ b/src/mongo/db/repl/replica_set_config_checks.cpp
@@ -60,8 +60,10 @@ StatusWith<int> findSelfInConfig(ReplicationCoordinatorExternalState* externalSt
if (meConfigs.empty()) {
return StatusWith<int>(ErrorCodes::NodeNotFound,
str::stream() << "No host described in new configuration "
- << newConfig.getConfigVersion() << " for replica set "
- << newConfig.getReplSetName() << " maps to this node");
+ << newConfig.getConfigVersion()
+ << " for replica set "
+ << newConfig.getReplSetName()
+ << " maps to this node");
}
if (meConfigs.size() > 1) {
str::stream message;
@@ -90,9 +92,11 @@ Status checkElectable(const ReplicaSetConfig& newConfig, int configIndex) {
if (!myConfig.isElectable()) {
return Status(ErrorCodes::NodeNotElectable,
str::stream() << "This node, " << myConfig.getHostAndPort().toString()
- << ", with _id " << myConfig.getId()
+ << ", with _id "
+ << myConfig.getId()
<< " is not electable under the new configuration version "
- << newConfig.getConfigVersion() << " for replica set "
+ << newConfig.getConfigVersion()
+ << " for replica set "
<< newConfig.getReplSetName());
}
return Status::OK();
@@ -138,22 +142,28 @@ Status validateOldAndNewConfigsCompatible(const ReplicaSetConfig& oldConfig,
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream()
<< "New replica set configuration version must be greater than old, but "
- << newConfig.getConfigVersion() << " is not greater than "
- << oldConfig.getConfigVersion() << " for replica set "
+ << newConfig.getConfigVersion()
+ << " is not greater than "
+ << oldConfig.getConfigVersion()
+ << " for replica set "
<< newConfig.getReplSetName());
}
if (oldConfig.getReplSetName() != newConfig.getReplSetName()) {
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream() << "New and old configurations differ in replica set name; "
- "old was " << oldConfig.getReplSetName() << ", and new is "
+ "old was "
+ << oldConfig.getReplSetName()
+ << ", and new is "
<< newConfig.getReplSetName());
}
if (oldConfig.getReplicaSetId() != newConfig.getReplicaSetId()) {
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream() << "New and old configurations differ in replica set ID; "
- "old was " << oldConfig.getReplicaSetId() << ", and new is "
+ "old was "
+ << oldConfig.getReplicaSetId()
+ << ", and new is "
<< newConfig.getReplicaSetId());
}
@@ -185,14 +195,18 @@ Status validateOldAndNewConfigsCompatible(const ReplicaSetConfig& oldConfig,
}
if (hostsEqual && !idsEqual) {
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- str::stream()
- << "New and old configurations both have members with "
- << MemberConfig::kHostFieldName << " of "
- << mOld->getHostAndPort().toString()
- << " but in the new configuration the "
- << MemberConfig::kIdFieldName << " field is " << mNew->getId()
- << " and in the old configuration it is " << mOld->getId()
- << " for replica set " << newConfig.getReplSetName());
+ str::stream() << "New and old configurations both have members with "
+ << MemberConfig::kHostFieldName
+ << " of "
+ << mOld->getHostAndPort().toString()
+ << " but in the new configuration the "
+ << MemberConfig::kIdFieldName
+ << " field is "
+ << mNew->getId()
+ << " and in the old configuration it is "
+ << mOld->getId()
+ << " for replica set "
+ << newConfig.getReplSetName());
}
// At this point, the _id and host fields are equal, so we're looking at the old and
// new configurations for the same member node.
diff --git a/src/mongo/db/repl/replica_set_config_checks_test.cpp b/src/mongo/db/repl/replica_set_config_checks_test.cpp
index ab43959d37d..394d6535dbd 100644
--- a/src/mongo/db/repl/replica_set_config_checks_test.cpp
+++ b/src/mongo/db/repl/replica_set_config_checks_test.cpp
@@ -48,7 +48,9 @@ TEST(ValidateConfigForInitiate, VersionMustBe1) {
ReplicaSetConfig config;
ASSERT_OK(config.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")))));
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
@@ -59,7 +61,9 @@ TEST(ValidateConfigForInitiate, MustFindSelf) {
ReplicaSetConfig config;
ASSERT_OK(config.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -85,12 +89,15 @@ TEST(ValidateConfigForInitiate, SelfMustBeElectable) {
ReplicaSetConfig config;
ASSERT_OK(config.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0)
+ << "priority"
+ << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
@@ -108,25 +115,29 @@ TEST(ValidateConfigForReconfig, NewConfigVersionNumberMustBeHigherThanOld) {
ReplicaSetConfig newConfig;
// Two configurations, identical except for version.
- ASSERT_OK(
- oldConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
- ASSERT_OK(
- newConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -162,25 +173,29 @@ TEST(ValidateConfigForReconfig, NewConfigMustNotChangeSetName) {
ReplicaSetConfig newConfig;
// Two configurations, compatible except for set name.
- ASSERT_OK(
- oldConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
- ASSERT_OK(
- newConfig.initialize(BSON("_id"
- << "rs1"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs1"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -201,27 +216,33 @@ TEST(ValidateConfigForReconfig, NewConfigMustNotChangeSetId) {
ReplicaSetConfig newConfig;
// Two configurations, compatible except for set ID.
- ASSERT_OK(
- oldConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3"))
- << "settings" << BSON("replicaSetId" << OID::gen()))));
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3"))
+ << "settings"
+ << BSON("replicaSetId" << OID::gen()))));
- ASSERT_OK(
- newConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3"))
- << "settings" << BSON("replicaSetId" << OID::gen()))));
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3"))
+ << "settings"
+ << BSON("replicaSetId" << OID::gen()))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -248,40 +269,51 @@ TEST(ValidateConfigForReconfig, NewConfigMustNotFlipBuildIndexesFlag) {
// The third, compatible with the first.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "buildIndexes" << false
- << "priority" << 0)
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "buildIndexes" << true
- << "priority" << 0)
+ << "buildIndexes"
+ << true
+ << "priority"
+ << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
- ASSERT_OK(
- oldConfigRefresh.initialize(BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "buildIndexes" << false
- << "priority" << 0)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfigRefresh.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -310,37 +342,45 @@ TEST(ValidateConfigForReconfig, NewConfigMustNotFlipArbiterFlag) {
// The third, compatible with the first.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly" << false)
+ << "arbiterOnly"
+ << false)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
- ASSERT_OK(
- oldConfigRefresh.initialize(BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "arbiterOnly" << false)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfigRefresh.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "arbiterOnly"
+ << false)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -369,15 +409,17 @@ TEST(ValidateConfigForReconfig, HostAndIdRemappingRestricted) {
ReplicaSetConfig illegalNewConfigReusingHost;
ReplicaSetConfig illegalNewConfigReusingId;
- ASSERT_OK(
- oldConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
//
@@ -387,7 +429,9 @@ TEST(ValidateConfigForReconfig, HostAndIdRemappingRestricted) {
ASSERT_OK(
legalNewConfigWithNewHostAndId.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 4 << "host"
@@ -395,8 +439,9 @@ TEST(ValidateConfigForReconfig, HostAndIdRemappingRestricted) {
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(legalNewConfigWithNewHostAndId.validate());
- ASSERT_OK(validateConfigForReconfig(
- &externalState, oldConfig, legalNewConfigWithNewHostAndId, false).getStatus());
+ ASSERT_OK(
+ validateConfigForReconfig(&externalState, oldConfig, legalNewConfigWithNewHostAndId, false)
+ .getStatus());
//
// Here, the new config is invalid because we've reused host name "h2" with
@@ -404,7 +449,9 @@ TEST(ValidateConfigForReconfig, HostAndIdRemappingRestricted) {
//
ASSERT_OK(illegalNewConfigReusingHost.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 4 << "host"
@@ -412,20 +459,24 @@ TEST(ValidateConfigForReconfig, HostAndIdRemappingRestricted) {
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(illegalNewConfigReusingHost.validate());
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(
- &externalState, oldConfig, illegalNewConfigReusingHost, false).getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, oldConfig, illegalNewConfigReusingHost, false)
+ .getStatus());
// Forced reconfigs also do not allow this.
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(
- &externalState, oldConfig, illegalNewConfigReusingHost, true).getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, oldConfig, illegalNewConfigReusingHost, true)
+ .getStatus());
//
// Here, the new config is valid, because all we've changed is the name of
// the host representing _id 2.
//
ASSERT_OK(illegalNewConfigReusingId.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -441,26 +492,30 @@ TEST(ValidateConfigForReconfig, MustFindSelf) {
// Old and new config are same except for version change; this is just testing that we can
// find ourself in the new config.
ReplicaSetConfig oldConfig;
- ASSERT_OK(
- oldConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ReplicaSetConfig newConfig;
- ASSERT_OK(
- newConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ReplicationCoordinatorExternalStateMock notPresentExternalState;
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
@@ -472,9 +527,10 @@ TEST(ValidateConfigForReconfig, MustFindSelf) {
ASSERT_EQUALS(ErrorCodes::NodeNotFound,
validateConfigForReconfig(&notPresentExternalState, oldConfig, newConfig, false)
.getStatus());
- ASSERT_EQUALS(ErrorCodes::DuplicateKey,
- validateConfigForReconfig(
- &presentThriceExternalState, oldConfig, newConfig, false).getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::DuplicateKey,
+ validateConfigForReconfig(&presentThriceExternalState, oldConfig, newConfig, false)
+ .getStatus());
ASSERT_EQUALS(1,
unittest::assertGet(validateConfigForReconfig(
&presentOnceExternalState, oldConfig, newConfig, false)));
@@ -494,25 +550,30 @@ TEST(ValidateConfigForReconfig, SelfMustEndElectable) {
// Old and new config are same except for version change and the electability of one node;
// this is just testing that we must be electable in the new config.
ReplicaSetConfig oldConfig;
- ASSERT_OK(
- oldConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0)
+ << "priority"
+ << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
@@ -533,7 +594,9 @@ TEST(ValidateConfigForInitiate, NewConfigInvalid) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -552,14 +615,18 @@ TEST(ValidateConfigForReconfig, NewConfigInvalid) {
ReplicaSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")))));
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -583,14 +650,18 @@ TEST(ValidateConfigForStartUp, NewConfigInvalid) {
ReplicaSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")))));
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -610,7 +681,9 @@ TEST(ValidateConfigForStartUp, OldAndNewConfigIncompatible) {
ReplicaSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -620,7 +693,9 @@ TEST(ValidateConfigForStartUp, OldAndNewConfigIncompatible) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 2 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -640,7 +715,9 @@ TEST(ValidateConfigForStartUp, OldAndNewConfigCompatible) {
ReplicaSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -650,10 +727,13 @@ TEST(ValidateConfigForStartUp, OldAndNewConfigCompatible) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2"
- << "priority" << 3)
+ << "priority"
+ << 3)
<< BSON("_id" << 1 << "host"
<< "h3")))));
@@ -670,7 +750,9 @@ TEST(ValidateConfigForHeartbeatReconfig, NewConfigInvalid) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -689,7 +771,9 @@ TEST(ValidateConfigForHeartbeatReconfig, NewConfigValid) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -706,7 +790,9 @@ TEST(ValidateForReconfig, ForceStillNeedsValidConfig) {
ReplicaSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -716,7 +802,9 @@ TEST(ValidateForReconfig, ForceStillNeedsValidConfig) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -735,7 +823,9 @@ TEST(ValidateForReconfig, ForceStillNeedsSelfPresent) {
ReplicaSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -745,7 +835,9 @@ TEST(ValidateForReconfig, ForceStillNeedsSelfPresent) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h3")
<< BSON("_id" << 2 << "host"
diff --git a/src/mongo/db/repl/replica_set_config_test.cpp b/src/mongo/db/repl/replica_set_config_test.cpp
index 100ca89383f..6e5bb69b40b 100644
--- a/src/mongo/db/repl/replica_set_config_test.cpp
+++ b/src/mongo/db/repl/replica_set_config_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/replica_set_config.h"
#include "mongo/db/server_options.h"
-#include "mongo/util/scopeguard.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/scopeguard.h"
namespace mongo {
namespace repl {
@@ -62,7 +62,9 @@ TEST(ReplicaSetConfig, ParseMinimalConfigAndCheckDefaults) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
@@ -88,20 +90,32 @@ TEST(ReplicaSetConfig, ParseMinimalConfigAndCheckDefaults) {
TEST(ReplicaSetConfig, ParseLargeConfigAndCheckAccessors) {
ReplicaSetConfig config;
- ASSERT_OK(config.initialize(BSON(
- "_id"
- << "rs0"
- << "version" << 1234 << "members" << BSON_ARRAY(BSON("_id" << 234 << "host"
- << "localhost:12345"
- << "tags" << BSON("NYC"
- << "NY")))
- << "protocolVersion" << 1 << "settings"
- << BSON("getLastErrorDefaults" << BSON("w"
- << "majority") << "getLastErrorModes"
- << BSON("eastCoast" << BSON("NYC" << 1)) << "chainingAllowed"
- << false << "heartbeatIntervalMillis" << 5000
- << "heartbeatTimeoutSecs" << 120 << "electionTimeoutMillis"
- << 10))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1234
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 234 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY")))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("getLastErrorDefaults"
+ << BSON("w"
+ << "majority")
+ << "getLastErrorModes"
+ << BSON("eastCoast" << BSON("NYC" << 1))
+ << "chainingAllowed"
+ << false
+ << "heartbeatIntervalMillis"
+ << 5000
+ << "heartbeatTimeoutSecs"
+ << 120
+ << "electionTimeoutMillis"
+ << 10))));
ASSERT_OK(config.validate());
ASSERT_EQUALS("rs0", config.getReplSetName());
ASSERT_EQUALS(1234, config.getConfigVersion());
@@ -123,44 +137,57 @@ TEST(ReplicaSetConfig, ParseLargeConfigAndCheckAccessors) {
TEST(ReplicaSetConfig, GetConnectionStringFiltersHiddenNodes) {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:11111")
- << BSON("_id" << 1 << "host"
- << "localhost:22222"
- << "arbiterOnly" << true)
- << BSON("_id" << 2 << "host"
- << "localhost:33333"
- << "hidden" << true << "priority" << 0)
- << BSON("_id" << 3 << "host"
- << "localhost:44444")))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:11111")
+ << BSON("_id" << 1 << "host"
+ << "localhost:22222"
+ << "arbiterOnly"
+ << true)
+ << BSON("_id" << 2 << "host"
+ << "localhost:33333"
+ << "hidden"
+ << true
+ << "priority"
+ << 0)
+ << BSON("_id" << 3 << "host"
+ << "localhost:44444")))));
ASSERT_OK(config.validate());
- ASSERT_EQUALS(
- ConnectionString::forReplicaSet(
- "rs0", {HostAndPort{"localhost:11111"}, HostAndPort{"localhost:44444"}}).toString(),
- config.getConnectionString().toString());
+ ASSERT_EQUALS(ConnectionString::forReplicaSet(
+ "rs0", {HostAndPort{"localhost:11111"}, HostAndPort{"localhost:44444"}})
+ .toString(),
+ config.getConnectionString().toString());
}
TEST(ReplicaSetConfig, MajorityCalculationThreeVotersNoArbiters) {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
- << BSON("_id" << 4 << "host"
- << "h4:1"
- << "votes" << 0 << "priority" << 0)
- << BSON("_id" << 5 << "host"
- << "h5:1"
- << "votes" << 0 << "priority" << 0)))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("_id" << 5 << "host"
+ << "h5:1"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(2, config.getWriteMajority());
@@ -168,24 +195,35 @@ TEST(ReplicaSetConfig, MajorityCalculationThreeVotersNoArbiters) {
TEST(ReplicaSetConfig, MajorityCalculationNearlyHalfArbiters) {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0)
- << BSON("host"
- << "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2)
- << BSON("host"
- << "node4:12345"
- << "_id" << 3 << "arbiterOnly" << true)
- << BSON("host"
- << "node5:12345"
- << "_id" << 4 << "arbiterOnly" << true)))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id"
+ << 3
+ << "arbiterOnly"
+ << true)
+ << BSON("host"
+ << "node5:12345"
+ << "_id"
+ << 4
+ << "arbiterOnly"
+ << true)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(3, config.getWriteMajority());
}
@@ -194,43 +232,64 @@ TEST(ReplicaSetConfig, MajorityCalculationEvenNumberOfMembers) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2)
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
<< BSON("host"
<< "node4:12345"
- << "_id" << 3)))));
+ << "_id"
+ << 3)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(3, config.getWriteMajority());
}
TEST(ReplicaSetConfig, MajorityCalculationNearlyHalfSecondariesNoVotes) {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(
- BSON("host"
- << "node1:12345"
- << "_id" << 0)
- << BSON("host"
- << "node2:12345"
- << "_id" << 1 << "votes" << 0 << "priority" << 0)
- << BSON("host"
- << "node3:12345"
- << "_id" << 2 << "votes" << 0 << "priority" << 0)
- << BSON("host"
- << "node4:12345"
- << "_id" << 3) << BSON("host"
- << "node5:12345"
- << "_id" << 4)))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("host"
+ << "node4:12345"
+ << "_id"
+ << 3)
+ << BSON("host"
+ << "node5:12345"
+ << "_id"
+ << 4)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(2, config.getWriteMajority());
}
@@ -253,7 +312,9 @@ TEST(ReplicaSetConfig, ParseFailsWithBadOrMissingIdField) {
// Empty repl set name parses, but does not validate.
ASSERT_OK(config.initialize(BSON("_id"
<< ""
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
@@ -263,40 +324,44 @@ TEST(ReplicaSetConfig, ParseFailsWithBadOrMissingIdField) {
TEST(ReplicaSetConfig, ParseFailsWithBadOrMissingVersionField) {
ReplicaSetConfig config;
// Config version field must be present.
- ASSERT_EQUALS(
- ErrorCodes::NoSuchKey,
- config.initialize(BSON("_id"
- << "rs0"
- << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
- ASSERT_EQUALS(
- ErrorCodes::TypeMismatch,
- config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << "1"
- << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey,
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch,
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << "1"
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1.0 << "members"
+ << "version"
+ << 1.0
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 0.0 << "members"
+ << "version"
+ << 0.0
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << static_cast<long long>(std::numeric_limits<int>::max()) + 1
- << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << static_cast<long long>(std::numeric_limits<int>::max()) + 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
}
@@ -305,13 +370,17 @@ TEST(ReplicaSetConfig, ParseFailsWithBadMembers) {
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< "localhost:23456"))));
ASSERT_NOT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "localhost:12345")))));
}
@@ -320,7 +389,9 @@ TEST(ReplicaSetConfig, ParseFailsWithLocalNonLocalHostMix) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost")
<< BSON("_id" << 1 << "host"
@@ -332,10 +403,13 @@ TEST(ReplicaSetConfig, ParseFailsWithNoElectableNodes) {
ReplicaSetConfig config;
const BSONObj configBsonNoElectableNodes = BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "priority" << 0)
+ << "priority"
+ << 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
<< "priority"
@@ -344,38 +418,51 @@ TEST(ReplicaSetConfig, ParseFailsWithNoElectableNodes) {
ASSERT_OK(config.initialize(configBsonNoElectableNodes));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- const BSONObj configBsonNoElectableNodesOneArbiter =
- BSON("_id"
- << "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:1"
- << "arbiterOnly" << 1)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "priority" << 0)));
+ const BSONObj configBsonNoElectableNodesOneArbiter = BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(
+ BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "arbiterOnly"
+ << 1)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "priority"
+ << 0)));
ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- const BSONObj configBsonNoElectableNodesTwoArbiters =
- BSON("_id"
- << "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:1"
- << "arbiterOnly" << 1)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "arbiterOnly" << 1)));
+ const BSONObj configBsonNoElectableNodesTwoArbiters = BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(
+ BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "arbiterOnly"
+ << 1)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "arbiterOnly"
+ << 1)));
ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
const BSONObj configBsonOneElectableNode = BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "priority" << 0)
+ << "priority"
+ << 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
<< "priority"
@@ -386,30 +473,42 @@ TEST(ReplicaSetConfig, ParseFailsWithNoElectableNodes) {
TEST(ReplicaSetConfig, ParseFailsWithTooFewVoters) {
ReplicaSetConfig config;
- const BSONObj configBsonNoVoters =
- BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:1"
- << "votes" << 0 << "priority" << 0)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "votes" << 0 << "priority" << 0)));
+ const BSONObj configBsonNoVoters = BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)));
ASSERT_OK(config.initialize(configBsonNoVoters));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
const BSONObj configBsonOneVoter = BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "votes" << 0 << "priority"
+ << "votes"
+ << 0
+ << "priority"
<< 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
- << "votes" << 1)));
+ << "votes"
+ << 1)));
ASSERT_OK(config.initialize(configBsonOneVoter));
ASSERT_OK(config.validate());
}
@@ -426,7 +525,9 @@ TEST(ReplicaSetConfig, ParseFailsWithDuplicateHost) {
ReplicaSetConfig config;
const BSONObj configBson = BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1")
<< BSON("_id" << 1 << "host"
@@ -477,7 +578,9 @@ TEST(ReplicaSetConfig, ParseFailsWithUnexpectedField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "unexpectedfield"
+ << "version"
+ << 1
+ << "unexpectedfield"
<< "value"));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
}
@@ -486,7 +589,9 @@ TEST(ReplicaSetConfig, ParseFailsWithNonArrayMembersField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< "value"));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -495,11 +600,14 @@ TEST(ReplicaSetConfig, ParseFailsWithNonNumericHeartbeatIntervalMillisField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("heartbeatIntervalMillis"
- << "no")));
+ << "settings"
+ << BSON("heartbeatIntervalMillis"
+ << "no")));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
ASSERT_FALSE(config.isInitialized());
@@ -512,11 +620,14 @@ TEST(ReplicaSetConfig, ParseFailsWithNonNumericElectionTimeoutMillisField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("electionTimeoutMillis"
- << "no")));
+ << "settings"
+ << BSON("electionTimeoutMillis"
+ << "no")));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -524,11 +635,14 @@ TEST(ReplicaSetConfig, ParseFailsWithNonNumericHeartbeatTimeoutSecsField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("heartbeatTimeoutSecs"
- << "no")));
+ << "settings"
+ << BSON("heartbeatTimeoutSecs"
+ << "no")));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -536,48 +650,57 @@ TEST(ReplicaSetConfig, ParseFailsWithNonBoolChainingAllowedField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("chainingAllowed"
- << "no")));
+ << "settings"
+ << BSON("chainingAllowed"
+ << "no")));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
TEST(ReplicaSetConfig, ParseFailsWithNonBoolConfigServerField) {
ReplicaSetConfig config;
- Status status =
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "configsvr"
- << "no"));
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "configsvr"
+ << "no"));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
TEST(ReplicaSetConfig, ParseFailsWithNonObjectSettingsField) {
ReplicaSetConfig config;
- Status status =
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
- << "none"));
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings"
+ << "none"));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
TEST(ReplicaSetConfig, ParseFailsWithGetLastErrorDefaultsFieldUnparseable) {
ReplicaSetConfig config;
- Status status =
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
- << BSON("getLastErrorDefaults" << BSON("fsync"
- << "seven"))));
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings"
+ << BSON("getLastErrorDefaults" << BSON("fsync"
+ << "seven"))));
ASSERT_EQUALS(ErrorCodes::FailedToParse, status);
}
@@ -585,11 +708,14 @@ TEST(ReplicaSetConfig, ParseFailsWithNonObjectGetLastErrorDefaultsField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("getLastErrorDefaults"
- << "no")));
+ << "settings"
+ << BSON("getLastErrorDefaults"
+ << "no")));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -597,41 +723,50 @@ TEST(ReplicaSetConfig, ParseFailsWithNonObjectGetLastErrorModesField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("getLastErrorModes"
- << "no")));
+ << "settings"
+ << BSON("getLastErrorModes"
+ << "no")));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
TEST(ReplicaSetConfig, ParseFailsWithDuplicateGetLastErrorModesField) {
ReplicaSetConfig config;
- Status status =
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "tags" << BSON("tag"
- << "yes"))) << "settings"
- << BSON("getLastErrorModes"
- << BSON("one" << BSON("tag" << 1) << "one"
- << BSON("tag" << 1)))));
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings"
+ << BSON("getLastErrorModes"
+ << BSON("one" << BSON("tag" << 1) << "one"
+ << BSON("tag" << 1)))));
ASSERT_EQUALS(ErrorCodes::DuplicateKey, status);
}
TEST(ReplicaSetConfig, ParseFailsWithNonObjectGetLastErrorModesEntryField) {
ReplicaSetConfig config;
- Status status =
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "tags" << BSON("tag"
- << "yes"))) << "settings"
- << BSON("getLastErrorModes" << BSON("one" << 1))));
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings"
+ << BSON("getLastErrorModes" << BSON("one" << 1))));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -640,11 +775,15 @@ TEST(ReplicaSetConfig, ParseFailsWithNonNumericGetLastErrorModesConstraintValue)
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "tags" << BSON("tag"
- << "yes"))) << "settings"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings"
<< BSON("getLastErrorModes" << BSON("one" << BSON("tag"
<< "no")))));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
@@ -655,11 +794,15 @@ TEST(ReplicaSetConfig, ParseFailsWithNegativeGetLastErrorModesConstraintValue) {
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "tags" << BSON("tag"
- << "yes"))) << "settings"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings"
<< BSON("getLastErrorModes" << BSON("one" << BSON("tag" << -1)))));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
}
@@ -669,11 +812,15 @@ TEST(ReplicaSetConfig, ParseFailsWithNonExistentGetLastErrorModesConstraintTag)
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "tags" << BSON("tag"
- << "yes"))) << "settings"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings"
<< BSON("getLastErrorModes" << BSON("one" << BSON("tag2" << 1)))));
ASSERT_EQUALS(ErrorCodes::NoSuchKey, status);
}
@@ -682,7 +829,11 @@ TEST(ReplicaSetConfig, ValidateFailsWithBadProtocolVersion) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 3 << "version" << 1 << "members"
+ << "protocolVersion"
+ << 3
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
@@ -697,7 +848,9 @@ TEST(ReplicaSetConfig, ValidateFailsWithDuplicateMemberId) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 0 << "host"
@@ -712,10 +865,13 @@ TEST(ReplicaSetConfig, ValidateFailsWithInvalidMember) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "hidden" << true))));
+ << "hidden"
+ << true))));
ASSERT_OK(status);
status = config.validate();
@@ -726,18 +882,24 @@ TEST(ReplicaSetConfig, ChainingAllowedField) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("chainingAllowed" << true))));
ASSERT_OK(config.validate());
ASSERT_TRUE(config.isChainingAllowed());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("chainingAllowed" << false))));
ASSERT_OK(config.validate());
ASSERT_FALSE(config.isChainingAllowed());
@@ -745,18 +907,27 @@ TEST(ReplicaSetConfig, ChainingAllowedField) {
TEST(ReplicaSetConfig, ConfigServerField) {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
- << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_TRUE(config.isConfigServer());
ReplicaSetConfig config2;
ASSERT_OK(config2.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "configsvr" << false << "members"
+ << "version"
+ << 1
+ << "configsvr"
+ << false
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config2.isConfigServer());
@@ -779,18 +950,25 @@ TEST(ReplicaSetConfig, ConfigServerFieldDefaults) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config.isConfigServer());
ReplicaSetConfig config2;
- ASSERT_OK(
- config2.initializeForInitiate(BSON("_id"
- << "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(config2.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_FALSE(config2.isConfigServer());
serverGlobalParams.clusterRole = ClusterRole::ConfigServer;
@@ -799,18 +977,25 @@ TEST(ReplicaSetConfig, ConfigServerFieldDefaults) {
ReplicaSetConfig config3;
ASSERT_OK(config3.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config3.isConfigServer());
ReplicaSetConfig config4;
- ASSERT_OK(
- config4.initializeForInitiate(BSON("_id"
- << "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(config4.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_TRUE(config4.isConfigServer());
}
@@ -818,18 +1003,24 @@ TEST(ReplicaSetConfig, HeartbeatIntervalField) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("heartbeatIntervalMillis" << 5000))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Seconds(5), config.getHeartbeatInterval());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("heartbeatIntervalMillis" << -5000))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
}
@@ -838,19 +1029,25 @@ TEST(ReplicaSetConfig, ElectionTimeoutField) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("electionTimeoutMillis" << 20))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Milliseconds(20), config.getElectionTimeoutPeriod());
auto status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("electionTimeoutMillis" << -20)));
+ << "settings"
+ << BSON("electionTimeoutMillis" << -20)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "election timeout must be greater than 0");
}
@@ -859,19 +1056,25 @@ TEST(ReplicaSetConfig, HeartbeatTimeoutField) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 20))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Seconds(20), config.getHeartbeatTimeoutPeriod());
auto status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("heartbeatTimeoutSecs" << -20)));
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << -20)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "heartbeat timeout must be greater than 0");
}
@@ -880,9 +1083,12 @@ TEST(ReplicaSetConfig, GleDefaultField) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("getLastErrorDefaults" << BSON("w"
<< "majority")))));
ASSERT_OK(config.validate());
@@ -890,32 +1096,43 @@ TEST(ReplicaSetConfig, GleDefaultField) {
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("getLastErrorDefaults" << BSON("w"
<< "frim")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("getLastErrorDefaults" << BSON("w" << 0)))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "tags" << BSON("a"
- << "v")))
- << "settings" << BSON("getLastErrorDefaults"
- << BSON("w"
- << "frim") << "getLastErrorModes"
- << BSON("frim" << BSON("a" << 1))))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("a"
+ << "v")))
+ << "settings"
+ << BSON("getLastErrorDefaults" << BSON("w"
+ << "frim")
+ << "getLastErrorModes"
+ << BSON("frim" << BSON("a" << 1))))));
ASSERT_OK(config.validate());
ASSERT_EQUALS("frim", config.getDefaultWriteConcern().wMode);
ASSERT_OK(config.findCustomWriteMode("frim").getStatus());
@@ -992,14 +1209,17 @@ bool operator==(const ReplicaSetConfig& a, const ReplicaSetConfig& b) {
TEST(ReplicaSetConfig, toBSONRoundTripAbility) {
ReplicaSetConfig configA;
ReplicaSetConfig configB;
- ASSERT_OK(configA.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
- << BSON("heartbeatIntervalMillis"
- << 5000 << "heartbeatTimeoutSecs" << 20
- << "replicaSetId" << OID::gen()))));
+ ASSERT_OK(configA.initialize(BSON(
+ "_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20 << "replicaSetId"
+ << OID::gen()))));
ASSERT_OK(configB.initialize(configA.toBSON()));
ASSERT_TRUE(configA == configB);
}
@@ -1007,35 +1227,66 @@ TEST(ReplicaSetConfig, toBSONRoundTripAbility) {
TEST(ReplicaSetConfig, toBSONRoundTripAbilityLarge) {
ReplicaSetConfig configA;
ReplicaSetConfig configB;
- ASSERT_OK(configA.initialize(BSON(
- "_id"
- << "asdf"
- << "version" << 9 << "writeConcernMajorityJournalDefault" << true << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "arbiterOnly" << true << "votes" << 1)
- << BSON("_id" << 3 << "host"
- << "localhost:3828"
- << "arbiterOnly" << false << "hidden" << true << "buildIndexes"
- << false << "priority" << 0 << "slaveDelay" << 17 << "votes"
- << 0 << "tags" << BSON("coast"
- << "east"
- << "ssd"
- << "true"))
- << BSON("_id" << 2 << "host"
- << "foo.com:3828"
- << "votes" << 0 << "priority" << 0 << "tags"
- << BSON("coast"
- << "west"
- << "hdd"
- << "true"))) << "protocolVersion" << 0 << "settings"
-
- << BSON("heartbeatIntervalMillis"
- << 5000 << "heartbeatTimeoutSecs" << 20 << "electionTimeoutMillis" << 4
- << "chainingAllowd" << true << "getLastErrorDefaults" << BSON("w"
- << "majority")
- << "getLastErrorModes" << BSON("disks" << BSON("ssd" << 1 << "hdd" << 1) << "coasts"
- << BSON("coast" << 2))))));
+ ASSERT_OK(configA.initialize(
+ BSON("_id"
+ << "asdf"
+ << "version"
+ << 9
+ << "writeConcernMajorityJournalDefault"
+ << true
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "arbiterOnly"
+ << true
+ << "votes"
+ << 1)
+ << BSON("_id" << 3 << "host"
+ << "localhost:3828"
+ << "arbiterOnly"
+ << false
+ << "hidden"
+ << true
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0
+ << "slaveDelay"
+ << 17
+ << "votes"
+ << 0
+ << "tags"
+ << BSON("coast"
+ << "east"
+ << "ssd"
+ << "true"))
+ << BSON("_id" << 2 << "host"
+ << "foo.com:3828"
+ << "votes"
+ << 0
+ << "priority"
+ << 0
+ << "tags"
+ << BSON("coast"
+ << "west"
+ << "hdd"
+ << "true")))
+ << "protocolVersion"
+ << 0
+ << "settings"
+
+ << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20
+ << "electionTimeoutMillis"
+ << 4
+ << "chainingAllowd"
+ << true
+ << "getLastErrorDefaults"
+ << BSON("w"
+ << "majority")
+ << "getLastErrorModes"
+ << BSON("disks" << BSON("ssd" << 1 << "hdd" << 1)
+ << "coasts"
+ << BSON("coast" << 2))))));
BSONObj configObjA = configA.toBSON();
// Ensure a protocolVersion does not show up if it is 0 to maintain cross version compatibility.
ASSERT_FALSE(configObjA.hasField("protocolVersion"));
@@ -1046,22 +1297,39 @@ TEST(ReplicaSetConfig, toBSONRoundTripAbilityLarge) {
TEST(ReplicaSetConfig, toBSONRoundTripAbilityInvalid) {
ReplicaSetConfig configA;
ReplicaSetConfig configB;
- ASSERT_OK(configA.initialize(
- BSON("_id"
- << ""
- << "version" << -3 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "arbiterOnly" << true << "votes" << 0 << "priority" << 0)
- << BSON("_id" << 0 << "host"
- << "localhost:3828"
- << "arbiterOnly" << false << "buildIndexes" << false
- << "priority" << 2)
- << BSON("_id" << 2 << "host"
- << "localhost:3828"
- << "votes" << 0 << "priority" << 0)) << "settings"
- << BSON("heartbeatIntervalMillis" << -5000 << "heartbeatTimeoutSecs" << 20
- << "electionTimeoutMillis" << 2))));
+ ASSERT_OK(
+ configA.initialize(BSON("_id"
+ << ""
+ << "version"
+ << -3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "arbiterOnly"
+ << true
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("_id" << 0 << "host"
+ << "localhost:3828"
+ << "arbiterOnly"
+ << false
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 2)
+ << BSON("_id" << 2 << "host"
+ << "localhost:3828"
+ << "votes"
+ << 0
+ << "priority"
+ << 0))
+ << "settings"
+ << BSON("heartbeatIntervalMillis" << -5000 << "heartbeatTimeoutSecs"
+ << 20
+ << "electionTimeoutMillis"
+ << 2))));
ASSERT_OK(configB.initialize(configA.toBSON()));
ASSERT_NOT_OK(configA.validate());
ASSERT_NOT_OK(configB.validate());
@@ -1070,46 +1338,57 @@ TEST(ReplicaSetConfig, toBSONRoundTripAbilityInvalid) {
TEST(ReplicaSetConfig, CheckIfWriteConcernCanBeSatisfied) {
ReplicaSetConfig configA;
- ASSERT_OK(configA.initialize(BSON(
- "_id"
- << "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node0"
- << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA1"))
- << BSON("_id" << 1 << "host"
- << "node1"
- << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA2"))
- << BSON("_id" << 2 << "host"
- << "node2"
- << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA3"))
- << BSON("_id" << 3 << "host"
- << "node3"
- << "tags" << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU1"))
- << BSON("_id" << 4 << "host"
- << "node4"
- << "tags" << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU2"))
- << BSON("_id" << 5 << "host"
- << "node5"
- << "arbiterOnly" << true))
- << "settings" << BSON("getLastErrorModes"
- << BSON("valid" << BSON("dc" << 2 << "rack" << 3)
- << "invalidNotEnoughValues" << BSON("dc" << 3)
- << "invalidNotEnoughNodes" << BSON("rack" << 6))))));
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node0"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA1"))
+ << BSON("_id" << 1 << "host"
+ << "node1"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA2"))
+ << BSON("_id" << 2 << "host"
+ << "node2"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA3"))
+ << BSON("_id" << 3 << "host"
+ << "node3"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU1"))
+ << BSON("_id" << 4 << "host"
+ << "node4"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU2"))
+ << BSON("_id" << 5 << "host"
+ << "node5"
+ << "arbiterOnly"
+ << true))
+ << "settings"
+ << BSON("getLastErrorModes"
+ << BSON("valid" << BSON("dc" << 2 << "rack" << 3)
+ << "invalidNotEnoughValues"
+ << BSON("dc" << 3)
+ << "invalidNotEnoughNodes"
+ << BSON("rack" << 6))))));
WriteConcernOptions validNumberWC;
validNumberWC.wNumNodes = 5;
@@ -1170,13 +1449,19 @@ TEST(ReplicaSetConfig, CheckConfigServerCantBeProtocolVersion0) {
ReplicaSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 0 << "version" << 1 << "configsvr"
- << true << "members"
+ << "protocolVersion"
+ << 0
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "arbiterOnly" << true)))));
+ << "arbiterOnly"
+ << true)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "cannot run in protocolVersion 0");
@@ -1186,13 +1471,19 @@ TEST(ReplicaSetConfig, CheckConfigServerCantHaveArbiters) {
ReplicaSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr"
- << true << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "arbiterOnly" << true)))));
+ << "arbiterOnly"
+ << true)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "Arbiters are not allowed");
@@ -1202,14 +1493,21 @@ TEST(ReplicaSetConfig, CheckConfigServerMustBuildIndexes) {
ReplicaSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr"
- << true << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority" << 0
- << "buildIndexes" << false)))));
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "must build indexes");
@@ -1217,16 +1515,23 @@ TEST(ReplicaSetConfig, CheckConfigServerMustBuildIndexes) {
TEST(ReplicaSetConfig, CheckConfigServerCantHaveSlaveDelay) {
ReplicaSetConfig configA;
- ASSERT_OK(
- configA.initialize(BSON("_id"
- << "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
- << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")
- << BSON("_id" << 1 << "host"
- << "localhost:54321"
- << "priority" << 0
- << "slaveDelay" << 3)))));
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")
+ << BSON("_id" << 1 << "host"
+ << "localhost:54321"
+ << "priority"
+ << 0
+ << "slaveDelay"
+ << 3)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "cannot have a non-zero slaveDelay");
@@ -1236,15 +1541,21 @@ TEST(ReplicaSetConfig, CheckConfigServerMustHaveTrueForWriteConcernMajorityJourn
serverGlobalParams.clusterRole = ClusterRole::ConfigServer;
ON_BLOCK_EXIT([&] { serverGlobalParams.clusterRole = ClusterRole::None; });
ReplicaSetConfig configA;
- ASSERT_OK(
- configA.initialize(BSON("_id"
- << "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
- << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")
- << BSON("_id" << 1 << "host"
- << "localhost:54321"))
- << "writeConcernMajorityJournalDefault" << false)));
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")
+ << BSON("_id" << 1 << "host"
+ << "localhost:54321"))
+ << "writeConcernMajorityJournalDefault"
+ << false)));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), " must be true in replica set configurations being ");
@@ -1254,22 +1565,30 @@ TEST(ReplicaSetConfig, GetPriorityTakeoverDelay) {
ReplicaSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1)
+ << "priority"
+ << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority" << 2)
+ << "priority"
+ << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority" << 3)
+ << "priority"
+ << 3)
<< BSON("_id" << 3 << "host"
<< "localhost:5421"
- << "priority" << 4)
+ << "priority"
+ << 4)
<< BSON("_id" << 4 << "host"
<< "localhost:5431"
- << "priority" << 5)) << "settings"
+ << "priority"
+ << 5))
+ << "settings"
<< BSON("electionTimeoutMillis" << 1000))));
ASSERT_OK(configA.validate());
ASSERT_EQUALS(Milliseconds(5000), configA.getPriorityTakeoverDelay(0));
@@ -1281,22 +1600,30 @@ TEST(ReplicaSetConfig, GetPriorityTakeoverDelay) {
ReplicaSetConfig configB;
ASSERT_OK(configB.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1)
+ << "priority"
+ << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority" << 2)
+ << "priority"
+ << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority" << 2)
+ << "priority"
+ << 2)
<< BSON("_id" << 3 << "host"
<< "localhost:5421"
- << "priority" << 3)
+ << "priority"
+ << 3)
<< BSON("_id" << 4 << "host"
<< "localhost:5431"
- << "priority" << 3)) << "settings"
+ << "priority"
+ << 3))
+ << "settings"
<< BSON("electionTimeoutMillis" << 1000))));
ASSERT_OK(configB.validate());
ASSERT_EQUALS(Milliseconds(5000), configB.getPriorityTakeoverDelay(0));
@@ -1311,7 +1638,9 @@ TEST(ReplicaSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajority
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
@@ -1321,10 +1650,13 @@ TEST(ReplicaSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajority
// Should be able to set it true in PV0.
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "writeConcernMajorityJournalDefault" << true)));
+ << "writeConcernMajorityJournalDefault"
+ << true)));
ASSERT_OK(config.validate());
ASSERT_TRUE(config.getWriteConcernMajorityShouldJournal());
ASSERT_TRUE(config.toBSON().hasField("writeConcernMajorityJournalDefault"));
@@ -1332,7 +1664,11 @@ TEST(ReplicaSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajority
// PV1, should default to true.
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
@@ -1342,10 +1678,15 @@ TEST(ReplicaSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajority
// Should be able to set it false in PV1.
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "writeConcernMajorityJournalDefault" << false)));
+ << "writeConcernMajorityJournalDefault"
+ << false)));
ASSERT_OK(config.validate());
ASSERT_FALSE(config.getWriteConcernMajorityShouldJournal());
ASSERT_TRUE(config.toBSON().hasField("writeConcernMajorityJournalDefault"));
@@ -1360,10 +1701,13 @@ TEST(ReplicaSetConfig, ReplSetId) {
auto status =
ReplicaSetConfig().initializeForInitiate(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1))
+ << "priority"
+ << 1))
<< "settings"
<< BSON("replicaSetId" << OID::gen())));
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, status);
@@ -1377,10 +1721,13 @@ TEST(ReplicaSetConfig, ReplSetId) {
ASSERT_OK(
configInitiate.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1)))));
+ << "priority"
+ << 1)))));
ASSERT_OK(configInitiate.validate());
ASSERT_TRUE(configInitiate.hasReplicaSetId());
OID replicaSetId = configInitiate.getReplicaSetId();
@@ -1389,11 +1736,15 @@ TEST(ReplicaSetConfig, ReplSetId) {
ReplicaSetConfig configLocal;
ASSERT_OK(configLocal.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1))
- << "settings" << BSON("replicaSetId" << replicaSetId))));
+ << "priority"
+ << 1))
+ << "settings"
+ << BSON("replicaSetId" << replicaSetId))));
ASSERT_OK(configLocal.validate());
ASSERT_TRUE(configLocal.hasReplicaSetId());
ASSERT_EQUALS(replicaSetId, configLocal.getReplicaSetId());
@@ -1402,10 +1753,13 @@ TEST(ReplicaSetConfig, ReplSetId) {
OID defaultReplicaSetId = OID::gen();
ASSERT_OK(configLocal.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1))),
+ << "priority"
+ << 1))),
true,
defaultReplicaSetId));
ASSERT_OK(configLocal.validate());
@@ -1415,10 +1769,14 @@ TEST(ReplicaSetConfig, ReplSetId) {
// 'replicaSetId' field cannot be null.
status = configLocal.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1)) << "settings"
+ << "priority"
+ << 1))
+ << "settings"
<< BSON("replicaSetId" << OID())));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "replicaSetId field value cannot be null");
@@ -1426,10 +1784,14 @@ TEST(ReplicaSetConfig, ReplSetId) {
// 'replicaSetId' field must be an OID.
status = configLocal.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1)) << "settings"
+ << "priority"
+ << 1))
+ << "settings"
<< BSON("replicaSetId" << 12345)));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
ASSERT_STRING_CONTAINS(status.reason(),
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index e705fd17c9d..292f0ef9ae4 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -53,18 +53,18 @@
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/repl/rs_sync.h"
#include "mongo/db/repl/rs_initialsync.h"
+#include "mongo/db/repl/rs_sync.h"
#include "mongo/db/repl/snapshot_thread.h"
#include "mongo/db/repl/storage_interface.h"
-#include "mongo/db/server_parameters.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/sharding_state_recovery.h"
+#include "mongo/db/server_parameters.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/executor/network_interface.h"
-#include "mongo/s/grid.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/grid.h"
#include "mongo/stdx/functional.h"
#include "mongo/stdx/memory.h"
#include "mongo/stdx/thread.h"
@@ -350,12 +350,15 @@ StatusWith<OpTime> ReplicationCoordinatorExternalStateImpl::loadLastOpTime(Opera
if (tsElement.eoo()) {
return StatusWith<OpTime>(ErrorCodes::NoSuchKey,
str::stream() << "Most recent entry in " << rsOplogName
- << " missing \"" << tsFieldName << "\" field");
+ << " missing \""
+ << tsFieldName
+ << "\" field");
}
if (tsElement.type() != bsonTimestamp) {
return StatusWith<OpTime>(ErrorCodes::TypeMismatch,
str::stream() << "Expected type of \"" << tsFieldName
- << "\" in most recent " << rsOplogName
+ << "\" in most recent "
+ << rsOplogName
<< " entry to have type Timestamp, but found "
<< typeName(tsElement.type()));
}
@@ -410,8 +413,8 @@ void ReplicationCoordinatorExternalStateImpl::updateShardIdentityConfigString(
if (ShardingState::get(txn)->enabled()) {
const auto configsvrConnStr =
Grid::get(txn)->shardRegistry()->getConfigShard()->getConnString();
- auto status = ShardingState::get(txn)
- ->updateShardIdentityConfigString(txn, configsvrConnStr.toString());
+ auto status = ShardingState::get(txn)->updateShardIdentityConfigString(
+ txn, configsvrConnStr.toString());
if (!status.isOK()) {
warning() << "error encountered while trying to update config connection string to "
<< configsvrConnStr << causedBy(status);
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index c33326031b7..8b89de49f60 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -203,18 +203,21 @@ ReplicationCoordinator::Mode getReplicationModeFromSettings(const ReplSettings&
DataReplicatorOptions createDataReplicatorOptions(ReplicationCoordinator* replCoord) {
DataReplicatorOptions options;
- options.rollbackFn =
- [](OperationContext*, const OpTime&, const HostAndPort&) -> Status { return Status::OK(); };
+ options.rollbackFn = [](OperationContext*, const OpTime&, const HostAndPort&) -> Status {
+ return Status::OK();
+ };
options.prepareReplSetUpdatePositionCommandFn =
[replCoord](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle commandStyle)
- -> StatusWith<BSONObj> {
- return replCoord->prepareReplSetUpdatePositionCommand(commandStyle);
- };
+ -> StatusWith<BSONObj> {
+ return replCoord->prepareReplSetUpdatePositionCommand(commandStyle);
+ };
options.getMyLastOptime = [replCoord]() { return replCoord->getMyLastAppliedOpTime(); };
- options.setMyLastOptime =
- [replCoord](const OpTime& opTime) { replCoord->setMyLastAppliedOpTime(opTime); };
- options.setFollowerMode =
- [replCoord](const MemberState& newState) { return replCoord->setFollowerMode(newState); };
+ options.setMyLastOptime = [replCoord](const OpTime& opTime) {
+ replCoord->setMyLastAppliedOpTime(opTime);
+ };
+ options.setFollowerMode = [replCoord](const MemberState& newState) {
+ return replCoord->setFollowerMode(newState);
+ };
options.getSlaveDelay = [replCoord]() { return replCoord->getSlaveDelaySecs(); };
options.syncSourceSelector = replCoord;
options.replBatchLimitBytes = dur::UncommittedBytesLimit;
@@ -367,8 +370,8 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* txn) {
if (!status.isOK()) {
error() << "Locally stored replica set configuration does not parse; See "
"http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
- "for information on how to recover from this. Got \"" << status
- << "\" while parsing " << cfg.getValue();
+ "for information on how to recover from this. Got \""
+ << status << "\" while parsing " << cfg.getValue();
fassertFailedNoTrace(28545);
}
@@ -417,8 +420,8 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
} else {
error() << "Locally stored replica set configuration is invalid; See "
"http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config"
- " for information on how to recover from this. Got \"" << myIndex.getStatus()
- << "\" while validating " << localConfig.toBSON();
+ " for information on how to recover from this. Got \""
+ << myIndex.getStatus() << "\" while validating " << localConfig.toBSON();
fassertFailedNoTrace(28544);
}
}
@@ -603,7 +606,8 @@ Status ReplicationCoordinatorImpl::waitForMemberState(MemberState expectedState,
if (!_memberStateChange.wait_for(lk, timeout.toSystemDuration(), pred)) {
return Status(ErrorCodes::ExceededTimeLimit,
str::stream() << "Timed out waiting for state to become "
- << expectedState.toString() << ". Current state is "
+ << expectedState.toString()
+ << ". Current state is "
<< _memberState.toString());
}
return Status::OK();
@@ -835,7 +839,8 @@ void ReplicationCoordinatorImpl::_updateSlaveInfoDurableOpTime_inlock(SlaveInfo*
if (slaveInfo->lastAppliedOpTime < opTime) {
log() << "Durable progress (" << opTime << ") is ahead of the applied progress ("
<< slaveInfo->lastAppliedOpTime << ". This is likely due to a "
- "rollback. slaveInfo: " << slaveInfo->toString();
+ "rollback. slaveInfo: "
+ << slaveInfo->toString();
return;
}
slaveInfo->lastDurableOpTime = opTime;
@@ -1009,9 +1014,9 @@ void ReplicationCoordinatorImpl::_setMyLastDurableOpTime_inlock(const OpTime& op
// lastAppliedOpTime cannot be behind lastDurableOpTime.
if (mySlaveInfo->lastAppliedOpTime < opTime) {
log() << "My durable progress (" << opTime << ") is ahead of my applied progress ("
- << mySlaveInfo->lastAppliedOpTime
- << ". This is likely due to a "
- "rollback. slaveInfo: " << mySlaveInfo->toString();
+ << mySlaveInfo->lastAppliedOpTime << ". This is likely due to a "
+ "rollback. slaveInfo: "
+ << mySlaveInfo->toString();
return;
}
_updateSlaveInfoDurableOpTime_inlock(mySlaveInfo, opTime);
@@ -2927,21 +2932,24 @@ SyncSourceResolverResponse ReplicationCoordinatorImpl::selectSyncSource(
// Candidate found.
Status queryStatus(ErrorCodes::NotYetInitialized, "not mutated");
BSONObj firstObjFound;
- auto work =
- [&firstObjFound, &queryStatus](const StatusWith<Fetcher::QueryResponse>& queryResult,
- NextAction* nextActiion,
- BSONObjBuilder* bob) {
- queryStatus = queryResult.getStatus();
- if (queryResult.isOK() && !queryResult.getValue().documents.empty()) {
- firstObjFound = queryResult.getValue().documents.front();
- }
- };
+ auto work = [&firstObjFound,
+ &queryStatus](const StatusWith<Fetcher::QueryResponse>& queryResult,
+ NextAction* nextActiion,
+ BSONObjBuilder* bob) {
+ queryStatus = queryResult.getStatus();
+ if (queryResult.isOK() && !queryResult.getValue().documents.empty()) {
+ firstObjFound = queryResult.getValue().documents.front();
+ }
+ };
Fetcher candidateProber(&_replExecutor,
candidate,
"local",
BSON("find"
<< "oplog.rs"
- << "limit" << 1 << "sort" << BSON("$natural" << 1)),
+ << "limit"
+ << 1
+ << "sort"
+ << BSON("$natural" << 1)),
work,
rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
Milliseconds(30000));
@@ -3414,8 +3422,9 @@ void ReplicationCoordinatorImpl::_resetElectionInfoOnProtocolVersionUpgrade(
}
CallbackHandle ReplicationCoordinatorImpl::_scheduleWork(const CallbackFn& work) {
- auto scheduleFn =
- [this](const CallbackFn& workWrapped) { return _replExecutor.scheduleWork(workWrapped); };
+ auto scheduleFn = [this](const CallbackFn& workWrapped) {
+ return _replExecutor.scheduleWork(workWrapped);
+ };
return _wrapAndScheduleWork(scheduleFn, work);
}
@@ -3440,8 +3449,9 @@ void ReplicationCoordinatorImpl::_scheduleWorkAtAndWaitForCompletion(Date_t when
}
CallbackHandle ReplicationCoordinatorImpl::_scheduleDBWork(const CallbackFn& work) {
- auto scheduleFn =
- [this](const CallbackFn& workWrapped) { return _replExecutor.scheduleDBWork(workWrapped); };
+ auto scheduleFn = [this](const CallbackFn& workWrapped) {
+ return _replExecutor.scheduleDBWork(workWrapped);
+ };
return _wrapAndScheduleWork(scheduleFn, work);
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 53f0ffde4c3..61fda88b59a 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -28,9 +28,9 @@
#pragma once
-#include <vector>
#include <memory>
#include <utility>
+#include <vector>
#include "mongo/base/status.h"
#include "mongo/bson/timestamp.h"
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
index a70c8963af8..494b8f7a0d6 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
@@ -31,10 +31,10 @@
#include "mongo/platform/basic.h"
#include "mongo/base/disallow_copying.h"
-#include "mongo/db/repl/replication_coordinator_impl.h"
-#include "mongo/db/repl/topology_coordinator_impl.h"
#include "mongo/db/repl/elect_cmd_runner.h"
#include "mongo/db/repl/freshness_checker.h"
+#include "mongo/db/repl/replication_coordinator_impl.h"
+#include "mongo/db/repl/topology_coordinator_impl.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
index a414ac0aabb..e3779fd3dee 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
@@ -77,7 +77,8 @@ void ReplCoordElectTest::simulateFreshEnoughForElectability() {
net->now(),
makeResponseStatus(BSON("ok" << 1 << "fresher" << false << "opTime"
<< Date_t::fromMillisSinceEpoch(Timestamp(0, 0).asLL())
- << "veto" << false)));
+ << "veto"
+ << false)));
} else {
error() << "Black holing unexpected request to " << request.target << ": "
<< request.cmdObj;
@@ -95,7 +96,9 @@ TEST_F(ReplCoordElectTest, StartElectionDoesNotStartAnElectionWhenNodeHasNoOplog
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -112,16 +115,22 @@ TEST_F(ReplCoordElectTest, StartElectionDoesNotStartAnElectionWhenNodeHasNoOplog
* vote(s) to win.
*/
TEST_F(ReplCoordElectTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"
- << "votes" << 0 << "hidden" << true << "priority" << 0))),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"
+ << "votes"
+ << 0
+ << "hidden"
+ << true
+ << "priority"
+ << 0))),
+ HostAndPort("node1", 12345));
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
// Fake OpTime from initiate, or a write op.
@@ -166,7 +175,9 @@ TEST_F(ReplCoordElectTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
@@ -197,7 +208,9 @@ TEST_F(ReplCoordElectTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
TEST_F(ReplCoordElectTest, ElectionSucceedsWhenAllNodesVoteYea) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -221,7 +234,9 @@ TEST_F(ReplCoordElectTest, ElectionFailsWhenOneNodeVotesNay) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -266,7 +281,9 @@ TEST_F(ReplCoordElectTest, VotesWithStringValuesAreNotCountedAsYeas) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -298,7 +315,8 @@ TEST_F(ReplCoordElectTest, VotesWithStringValuesAreNotCountedAsYeas) {
net->now(),
makeResponseStatus(BSON("ok" << 1 << "vote"
<< "yea"
- << "round" << OID())));
+ << "round"
+ << OID())));
}
net->runReadyNetworkOperations();
}
@@ -311,7 +329,9 @@ TEST_F(ReplCoordElectTest, VotesWithStringValuesAreNotCountedAsYeas) {
TEST_F(ReplCoordElectTest, ElectionsAbortWhenNodeTransitionsToRollbackState) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -344,19 +364,22 @@ TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
// start up, receive reconfig via heartbeat while at the same time, become candidate.
// candidate state should be cleared.
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345") << BSON("_id" << 3 << "host"
- << "node3:12345")
- << BSON("_id" << 4 << "host"
- << "node4:12345") << BSON("_id" << 5 << "host"
- << "node5:12345"))),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 4 << "host"
+ << "node4:12345")
+ << BSON("_id" << 5 << "host"
+ << "node5:12345"))),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
@@ -370,7 +393,9 @@ TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
ReplicaSetConfig config;
config.initialize(BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -428,19 +453,21 @@ TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
stopCapturingLogMessages();
// ensure node does not stand for election
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "Not standing for election; processing "
- "a configuration change"));
+ countLogLinesContaining("Not standing for election; processing "
+ "a configuration change"));
getExternalState()->setStoreLocalConfigDocumentToHang(false);
}
TEST_F(ReplCoordElectTest, StepsDownRemoteIfNodeHasHigherPriorityThanCurrentPrimary) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority" << 2)
+ << "priority"
+ << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
@@ -501,23 +528,27 @@ TEST_F(ReplCoordElectTest, StepsDownRemoteIfNodeHasHigherPriorityThanCurrentPrim
net->exitNetwork();
ASSERT_EQUALS(1,
countLogLinesContaining(str::stream() << "stepdown of primary("
- << target.toString() << ") succeeded"));
+ << target.toString()
+ << ") succeeded"));
}
TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringFreshnessCheckingPhase) {
// Start up and become electable.
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 3 << "host"
- << "node3:12345") << BSON("_id" << 2 << "host"
- << "node2:12345"))
- << "settings" << BSON("heartbeatIntervalMillis" << 100)),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis" << 100)),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 0), 0));
@@ -542,10 +573,13 @@ TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringFresh
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version" << 4 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))),
+ << "version"
+ << 4
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
true};
BSONObjBuilder result;
@@ -560,17 +594,20 @@ TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringFresh
TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringElectionPhase) {
// Start up and become electable.
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 3 << "host"
- << "node3:12345") << BSON("_id" << 2 << "host"
- << "node2:12345"))
- << "settings" << BSON("heartbeatIntervalMillis" << 100)),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis" << 100)),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 0), 0));
@@ -582,10 +619,13 @@ TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringElect
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version" << 4 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))),
+ << "version"
+ << 4
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
true};
BSONObjBuilder result;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index beaf238fcef..100f44d4156 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -53,17 +53,24 @@ using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"
- << "votes" << 0 << "hidden" << true << "priority" << 0))
- << "protocolVersion" << 1),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"
+ << "votes"
+ << 0
+ << "hidden"
+ << true
+ << "priority"
+ << 0))
+ << "protocolVersion"
+ << 1),
+ HostAndPort("node1", 12345));
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
@@ -117,11 +124,14 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
TEST_F(ReplCoordTest, StartElectionDoesNotStartAnElectionWhenNodeIsRecovering) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
- << "node2:12345")) << "protocolVersion"
+ << "node2:12345"))
+ << "protocolVersion"
<< 1),
HostAndPort("node1", 12345));
@@ -142,9 +152,13 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")) << "protocolVersion" << 1),
+ << "node1:12345"))
+ << "protocolVersion"
+ << 1),
HostAndPort("node1", 12345));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(10, 0), 0));
@@ -172,13 +186,16 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
OperationContextNoop txn;
@@ -202,20 +219,25 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) {
TEST_F(ReplCoordTest, ElectionSucceedsWhenMaxSevenNodesVoteYea) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(
- BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345") << BSON("_id" << 3 << "host"
- << "node3:12345")
- << BSON("_id" << 4 << "host"
- << "node4:12345") << BSON("_id" << 5 << "host"
- << "node5:12345")
- << BSON("_id" << 6 << "host"
- << "node6:12345") << BSON("_id" << 7 << "host"
- << "node7:12345"))
- << "protocolVersion" << 1);
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 4 << "host"
+ << "node4:12345")
+ << BSON("_id" << 5 << "host"
+ << "node5:12345")
+ << BSON("_id" << 6 << "host"
+ << "node6:12345")
+ << BSON("_id" << 7 << "host"
+ << "node7:12345"))
+ << "protocolVersion"
+ << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
OperationContextNoop txn;
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0));
@@ -239,13 +261,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun)
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -278,9 +303,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun)
} else {
net->scheduleResponse(noi,
net->now(),
- makeResponseStatus(BSON("ok" << 1 << "term" << 0 << "voteGranted"
- << false << "reason"
- << "don't like him much")));
+ makeResponseStatus(BSON(
+ "ok" << 1 << "term" << 0 << "voteGranted" << false << "reason"
+ << "don't like him much")));
voteRequests++;
}
net->runReadyNetworkOperations();
@@ -295,13 +320,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -336,7 +364,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) {
noi,
net->now(),
makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long() + 1
- << "voteGranted" << false << "reason"
+ << "voteGranted"
+ << false
+ << "reason"
<< "quit living in the past")));
voteRequests++;
}
@@ -353,20 +383,24 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
// start up, receive reconfig via heartbeat while at the same time, become candidate.
// candidate state should be cleared.
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345") << BSON("_id" << 3 << "host"
- << "node3:12345")
- << BSON("_id" << 4 << "host"
- << "node4:12345") << BSON("_id" << 5 << "host"
- << "node5:12345"))
- << "protocolVersion" << 1),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 4 << "host"
+ << "node4:12345")
+ << BSON("_id" << 5 << "host"
+ << "node5:12345"))
+ << "protocolVersion"
+ << 1),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 0), 0));
@@ -381,11 +415,14 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
ReplicaSetConfig config;
config.initialize(BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
- << "node2:12345")) << "protocolVersion"
+ << "node2:12345"))
+ << "protocolVersion"
<< 1));
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
@@ -451,9 +488,8 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
stopCapturingLogMessages();
// ensure node does not stand for election
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "Not standing for election; processing "
- "a configuration change"));
+ countLogLinesContaining("Not standing for election; processing "
+ "a configuration change"));
getExternalState()->setStoreLocalConfigDocumentToHang(false);
}
@@ -461,13 +497,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -492,9 +531,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
} else {
net->scheduleResponse(noi,
net->now(),
- makeResponseStatus(BSON("ok" << 1 << "term" << 1 << "voteGranted"
- << false << "reason"
- << "don't like him much")));
+ makeResponseStatus(BSON(
+ "ok" << 1 << "term" << 1 << "voteGranted" << false << "reason"
+ << "don't like him much")));
}
net->runReadyNetworkOperations();
}
@@ -509,13 +548,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
TEST_F(ReplCoordTest, ElectionsAbortWhenNodeTransitionsToRollbackState) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -544,13 +586,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -577,7 +622,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) {
noi,
net->now(),
makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long() + 1
- << "voteGranted" << false << "reason"
+ << "voteGranted"
+ << false
+ << "reason"
<< "quit living in the past")));
}
net->runReadyNetworkOperations();
@@ -594,13 +641,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringDryRun) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
@@ -631,13 +681,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -665,9 +718,10 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
net->scheduleResponse(
noi,
net->now(),
- makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long()
- << "voteGranted" << true << "reason"
- << "")));
+ makeResponseStatus(BSON(
+ "ok" << 1 << "term" << request.cmdObj["term"].Long() << "voteGranted" << true
+ << "reason"
+ << "")));
}
net->runReadyNetworkOperations();
}
@@ -682,14 +736,18 @@ TEST_F(ReplCoordTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurren
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority" << 2)
+ << "priority"
+ << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -746,17 +804,20 @@ TEST_F(ReplCoordTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurren
TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
// Start up and become electable.
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 3 << "host"
- << "node3:12345") << BSON("_id" << 2 << "host"
- << "node2:12345"))
- << "settings" << BSON("heartbeatIntervalMillis" << 100)),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis" << 100)),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 0), 0));
@@ -779,10 +840,13 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version" << 4 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))),
+ << "version"
+ << 4
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
true};
BSONObjBuilder result;
@@ -797,17 +861,20 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringVotePhase) {
// Start up and become electable.
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 3 << "host"
- << "node3:12345") << BSON("_id" << 2 << "host"
- << "node2:12345"))
- << "settings" << BSON("heartbeatIntervalMillis" << 100)),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis" << 100)),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 0), 0));
@@ -819,10 +886,13 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringVotePhase)
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version" << 4 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))),
+ << "version"
+ << 4
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
true};
BSONObjBuilder result;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index d9b9297bfb7..4c8945ac064 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -146,11 +146,11 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
if (replMetadata.isOK() && _rsConfig.isInitialized() && _rsConfig.hasReplicaSetId() &&
replMetadata.getValue().getReplicaSetId().isSet() &&
_rsConfig.getReplicaSetId() != replMetadata.getValue().getReplicaSetId()) {
- responseStatus =
- Status(ErrorCodes::InvalidReplicaSetConfig,
- str::stream()
- << "replica set IDs do not match, ours: " << _rsConfig.getReplicaSetId()
- << "; remote node's: " << replMetadata.getValue().getReplicaSetId());
+ responseStatus = Status(ErrorCodes::InvalidReplicaSetConfig,
+ str::stream() << "replica set IDs do not match, ours: "
+ << _rsConfig.getReplicaSetId()
+ << "; remote node's: "
+ << replMetadata.getValue().getReplicaSetId());
// Ignore metadata.
replMetadata = responseStatus;
}
@@ -435,14 +435,16 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
if (!myIndex.getStatus().isOK() && myIndex.getStatus() != ErrorCodes::NodeNotFound) {
warning() << "Not persisting new configuration in heartbeat response to disk because "
- "it is invalid: " << myIndex.getStatus();
+ "it is invalid: "
+ << myIndex.getStatus();
} else {
Status status = _externalState->storeLocalConfigDocument(cbd.txn, newConfig.toBSON());
lk.lock();
if (!status.isOK()) {
error() << "Ignoring new configuration in heartbeat response because we failed to"
- " write it to stable storage; " << status;
+ " write it to stable storage; "
+ << status;
invariant(_rsConfigState == kConfigHBReconfiguring);
if (_rsConfig.isInitialized()) {
_setConfigState_inlock(kConfigSteady);
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
index 73c9e77c77c..91697d5ad01 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
@@ -90,7 +90,9 @@ TEST_F(ReplCoordHBTest, NodeJoinsExistingReplSetWhenReceivingAConfigContainingTh
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
ReplicaSetConfig rsConfig = assertMakeRSConfigV0(BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -154,7 +156,9 @@ TEST_F(ReplCoordHBTest,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
ReplicaSetConfig rsConfig = assertMakeRSConfigV0(BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -230,7 +234,9 @@ TEST_F(ReplCoordHBTest,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -243,12 +249,12 @@ TEST_F(ReplCoordHBTest,
const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
log() << request.target.toString() << " processing " << request.cmdObj;
- getNet()->scheduleResponse(
- noi,
- getNet()->now(),
- makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
- << "unauth'd"
- << "code" << ErrorCodes::Unauthorized)));
+ getNet()->scheduleResponse(noi,
+ getNet()->now(),
+ makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
+ << "unauth'd"
+ << "code"
+ << ErrorCodes::Unauthorized)));
if (request.target != HostAndPort("node2", 12345) &&
request.cmdObj.firstElement().fieldNameStringData() != "replSetHeartbeat") {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index 2233e21cc21..2290786e343 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -86,16 +86,19 @@ ReplSetHeartbeatResponse ReplCoordHBV1Test::receiveHeartbeatFrom(const ReplicaSe
TEST_F(ReplCoordHBV1Test,
NodeJoinsExistingReplSetWhenReceivingAConfigContainingTheNodeViaHeartbeat) {
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplicaSetConfig rsConfig =
- assertMakeRSConfig(BSON("_id"
- << "mySet"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1"))
- << "protocolVersion" << 1));
+ ReplicaSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))
+ << "protocolVersion"
+ << 1));
init("mySet");
addSelf(HostAndPort("h2", 1));
const Date_t startDate = getNet()->now();
@@ -150,17 +153,21 @@ TEST_F(ReplCoordHBV1Test,
TEST_F(ReplCoordHBV1Test,
ArbiterJoinsExistingReplSetWhenReceivingAConfigContainingTheArbiterViaHeartbeat) {
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplicaSetConfig rsConfig =
- assertMakeRSConfig(BSON("_id"
- << "mySet"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1"
- << "arbiterOnly" << true)
- << BSON("_id" << 3 << "host"
- << "h3:1")) << "protocolVersion" << 1));
+ ReplicaSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1"
+ << "arbiterOnly"
+ << true)
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))
+ << "protocolVersion"
+ << 1));
init("mySet");
addSelf(HostAndPort("h2", 1));
const Date_t startDate = getNet()->now();
@@ -217,16 +224,19 @@ TEST_F(ReplCoordHBV1Test,
// Tests that a node in RS_STARTUP will not transition to RS_REMOVED if it receives a
// configuration that does not contain it.
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplicaSetConfig rsConfig =
- assertMakeRSConfig(BSON("_id"
- << "mySet"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1"))
- << "protocolVersion" << 1));
+ ReplicaSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))
+ << "protocolVersion"
+ << 1));
init("mySet");
addSelf(HostAndPort("h4", 1));
const Date_t startDate = getNet()->now();
@@ -296,7 +306,9 @@ TEST_F(ReplCoordHBV1Test,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -309,12 +321,12 @@ TEST_F(ReplCoordHBV1Test,
const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
log() << request.target.toString() << " processing " << request.cmdObj;
- getNet()->scheduleResponse(
- noi,
- getNet()->now(),
- makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
- << "unauth'd"
- << "code" << ErrorCodes::Unauthorized)));
+ getNet()->scheduleResponse(noi,
+ getNet()->now(),
+ makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
+ << "unauth'd"
+ << "code"
+ << ErrorCodes::Unauthorized)));
if (request.target != HostAndPort("node2", 12345) &&
request.cmdObj.firstElement().fieldNameStringData() != "replSetHeartbeat") {
@@ -331,10 +343,9 @@ TEST_F(ReplCoordHBV1Test,
TEST_F(ReplCoordHBV1Test, ArbiterRecordsCommittedOpTimeFromHeartbeatMetadata) {
// Tests that an arbiter will update its committed optime from the heartbeat metadata
- assertStartSuccess(fromjson(
- "{_id:'mySet', version:1, protocolVersion:1, members:["
- "{_id:1, host:'node1:12345', arbiterOnly:true}, "
- "{_id:2, host:'node2:12345'}]}"),
+ assertStartSuccess(fromjson("{_id:'mySet', version:1, protocolVersion:1, members:["
+ "{_id:1, host:'node1:12345', arbiterOnly:true}, "
+ "{_id:2, host:'node2:12345'}]}"),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_ARBITER));
@@ -342,14 +353,21 @@ TEST_F(ReplCoordHBV1Test, ArbiterRecordsCommittedOpTimeFromHeartbeatMetadata) {
// its current optime to 'expected'
auto test = [this](OpTime committedOpTime, OpTime expected) {
// process heartbeat metadata directly
- StatusWith<rpc::ReplSetMetadata> metadata = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName
- << BSON("lastOpCommitted" << BSON("ts" << committedOpTime.getTimestamp() << "t"
- << committedOpTime.getTerm()) << "lastOpVisible"
- << BSON("ts" << committedOpTime.getTimestamp() << "t"
- << committedOpTime.getTerm()) << "configVersion"
- << 1 << "primaryIndex" << 1 << "term"
- << committedOpTime.getTerm() << "syncSourceIndex" << 1)));
+ StatusWith<rpc::ReplSetMetadata> metadata = rpc::ReplSetMetadata::readFromMetadata(
+ BSON(rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << committedOpTime.getTimestamp() << "t"
+ << committedOpTime.getTerm())
+ << "lastOpVisible"
+ << BSON("ts" << committedOpTime.getTimestamp() << "t"
+ << committedOpTime.getTerm())
+ << "configVersion"
+ << 1
+ << "primaryIndex"
+ << 1
+ << "term"
+ << committedOpTime.getTerm()
+ << "syncSourceIndex"
+ << 1)));
ASSERT_OK(metadata.getStatus());
getReplCoord()->processReplSetMetadata(metadata.getValue());
@@ -368,11 +386,15 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
HostAndPort host2("node2:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host" << host2.toString()))
- << "settings" << BSON("replicaSetId" << OID::gen()) << "protocolVersion"
+ << "settings"
+ << BSON("replicaSetId" << OID::gen())
+ << "protocolVersion"
<< 1),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -438,9 +460,10 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
ASSERT_EQ(MemberState(MemberState::RS_DOWN).toString(),
MemberState(member["state"].numberInt()).toString());
ASSERT_EQ(member["lastHeartbeatMessage"].String(),
- std::string(str::stream()
- << "replica set IDs do not match, ours: " << rsConfig.getReplicaSetId()
- << "; remote node's: " << unexpectedId));
+ std::string(str::stream() << "replica set IDs do not match, ours: "
+ << rsConfig.getReplicaSetId()
+ << "; remote node's: "
+ << unexpectedId));
}
} // namespace
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 5c97e3bc976..e91aa8cb1e0 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -35,10 +35,10 @@
#include "mongo/db/repl/repl_set_heartbeat_args.h"
#include "mongo/db/repl/repl_set_heartbeat_response.h"
#include "mongo/db/repl/replica_set_config.h"
+#include "mongo/db/repl/replication_coordinator.h" // ReplSetReconfigArgs
#include "mongo/db/repl/replication_coordinator_external_state_mock.h"
#include "mongo/db/repl/replication_coordinator_impl.h"
#include "mongo/db/repl/replication_coordinator_test_fixture.h"
-#include "mongo/db/repl/replication_coordinator.h" // ReplSetReconfigArgs
#include "mongo/executor/network_interface_mock.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
@@ -72,7 +72,9 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenReconfigReceivedWhileSecondary) {
init();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -96,7 +98,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -112,13 +116,19 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 2 << "invalidlyNamedField" << 3 << "members"
+ << "version"
+ << 2
+ << "invalidlyNamedField"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "node2:12345"
- << "arbiterOnly" << true)));
+ << "arbiterOnly"
+ << true)));
// ErrorCodes::BadValue should be propagated from ReplicaSetConfig::initialize()
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
getReplCoord()->processReplSetReconfig(&txn, args, &result));
@@ -130,7 +140,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -146,7 +158,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "notMySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -162,11 +176,14 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
- << "node2:12345")) << "settings"
+ << "node2:12345"))
+ << "settings"
<< BSON("replicaSetId" << OID::gen())),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -179,11 +196,14 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
- << "node2:12345")) << "settings"
+ << "node2:12345"))
+ << "settings"
<< BSON("replicaSetId" << OID::gen()));
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
@@ -197,7 +217,9 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -213,7 +235,9 @@ TEST_F(ReplCoordTest,
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << -3 << "members"
+ << "version"
+ << -3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -231,7 +255,9 @@ void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) {
replCoord->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -247,12 +273,15 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
// Replica set id will be copied from existing configuration.
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"
- << "priority" << 3)));
+ << "priority"
+ << 3)));
*status = replCoord->processReplSetReconfig(&txn, args, &garbage);
}
@@ -263,7 +292,9 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -302,7 +333,9 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenSavingANewConfigFailsDuringRe
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -329,7 +362,9 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -353,7 +388,9 @@ TEST_F(ReplCoordTest,
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -389,7 +426,9 @@ TEST_F(ReplCoordTest, NodeReturnsConfigurationInProgressWhenReceivingAReconfigWh
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -408,11 +447,14 @@ TEST_F(ReplCoordTest, PrimaryNodeAcceptsNewConfigWhenReceivingAReconfigWithAComp
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
- << "node2:12345")) << "settings"
+ << "node2:12345"))
+ << "settings"
<< BSON("replicaSetId" << OID::gen())),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -451,7 +493,9 @@ TEST_F(
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -473,7 +517,9 @@ TEST_F(
ReplicaSetConfig config;
config.initialize(BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -507,7 +553,9 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -536,7 +584,9 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
ReplicaSetConfig config;
config.initialize(BSON("_id"
<< "mySet"
- << "version" << 4 << "members"
+ << "version"
+ << 4
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -570,7 +620,9 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary
init();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -586,7 +638,9 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 41bbb2a2d4b..0343865aa6c 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -37,8 +37,8 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/operation_context_noop.h"
-#include "mongo/db/repl/handshake_args.h"
#include "mongo/db/repl/bson_extract_optime.h"
+#include "mongo/db/repl/handshake_args.h"
#include "mongo/db/repl/is_master_response.h"
#include "mongo/db/repl/old_update_position_args.h"
#include "mongo/db/repl/optime.h"
@@ -112,7 +112,9 @@ void runSingleNodeElection(ServiceContext::UniqueOperationContext txn,
TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
@@ -123,10 +125,13 @@ TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig)
TEST_F(ReplCoordTest, NodeEntersArbiterStateWhenStartingUpWithValidLocalConfigWhereItIsAnArbiter) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "node2:12345"))),
HostAndPort("node1", 12345));
@@ -138,7 +143,9 @@ TEST_F(ReplCoordTest, NodeEntersRemovedStateWhenStartingUpWithALocalConfigWhichL
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -155,7 +162,9 @@ TEST_F(ReplCoordTest,
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "notMySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
@@ -195,7 +204,9 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -209,7 +220,9 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result2));
@@ -227,16 +240,20 @@ TEST_F(ReplCoordTest,
// Starting uninitialized, show that we can perform the initiate behavior.
BSONObjBuilder result1;
- auto status = getReplCoord()->processReplSetInitiate(
- &txn,
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node1:12345"
- << "arbiterOnly" << true)
- << BSON("_id" << 1 << "host"
- << "node2:12345"))),
- &result1);
+ auto status =
+ getReplCoord()->processReplSetInitiate(&txn,
+ BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345"
+ << "arbiterOnly"
+ << true)
+ << BSON("_id" << 1 << "host"
+ << "node2:12345"))),
+ &result1);
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, status);
ASSERT_STRING_CONTAINS(status.reason(), "is not electable under the new configuration version");
ASSERT_FALSE(getExternalState()->threadsStarted());
@@ -258,7 +275,9 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -276,7 +295,9 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node4"))),
&result));
@@ -289,7 +310,9 @@ void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) {
replCoord->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345")
<< BSON("_id" << 1 << "host"
@@ -376,7 +399,9 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "wrongSet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -425,7 +450,9 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1);
@@ -446,7 +473,9 @@ TEST_F(ReplCoordTest, InitiateFailsWithoutReplSetFlag) {
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -467,7 +496,9 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenInitiateCannotWriteConfigToDi
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -519,9 +550,13 @@ TEST_F(
TEST_F(ReplCoordTest, NodeReturnsOkWhenCheckReplEnabledForCommandAfterReceivingAConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
// check status OK and result is empty
@@ -580,15 +615,21 @@ TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstAMas
TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASecondaryNode) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
OperationContextNoop txn;
@@ -608,15 +649,21 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASec
TEST_F(ReplCoordTest, NodeReturnsOkWhenRunningAwaitReplicationAgainstPrimaryWithWZero) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
OperationContextNoop txn;
@@ -644,21 +691,28 @@ TEST_F(ReplCoordTest, NodeReturnsOkWhenRunningAwaitReplicationAgainstPrimaryWith
TEST_F(ReplCoordTest,
NodeReturnsWriteConcernFailedUntilASufficientNumberOfNodesHaveTheWriteDurable) {
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0)
- << BSON("host"
- << "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2) << BSON("host"
- << "node4:12345"
- << "_id" << 3))),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id"
+ << 3))),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
getReplCoord()->setMyLastDurableOpTime(OpTimeWithTermZero(100, 0));
@@ -717,21 +771,28 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, NodeReturnsWriteConcernFailedUntilASufficientNumberOfNodesHaveTheWrite) {
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0)
- << BSON("host"
- << "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2) << BSON("host"
- << "node4:12345"
- << "_id" << 3))),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id"
+ << 3))),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
getReplCoord()->setMyLastDurableOpTime(OpTimeWithTermZero(100, 0));
@@ -790,15 +851,19 @@ TEST_F(ReplCoordTest,
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node0")
<< BSON("_id" << 1 << "host"
- << "node1") << BSON("_id" << 2 << "host"
- << "node2")
+ << "node1")
+ << BSON("_id" << 2 << "host"
+ << "node2")
<< BSON("_id" << 3 << "host"
- << "node3") << BSON("_id" << 4 << "host"
- << "node4"))),
+ << "node3")
+ << BSON("_id" << 4 << "host"
+ << "node4"))),
HostAndPort("node0"));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
@@ -828,37 +893,45 @@ TEST_F(
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node0"
- << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA1"))
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA1"))
<< BSON("_id" << 1 << "host"
<< "node1"
- << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA2"))
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA2"))
<< BSON("_id" << 2 << "host"
<< "node2"
- << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA3"))
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA3"))
<< BSON("_id" << 3 << "host"
<< "node3"
- << "tags" << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU1"))
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU1"))
<< BSON("_id" << 4 << "host"
<< "node4"
- << "tags" << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU2"))) << "settings"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU2")))
+ << "settings"
<< BSON("getLastErrorModes" << BSON("multiDC" << BSON("dc" << 2) << "multiDCAndRack"
<< BSON("dc" << 2 << "rack" << 3)))),
HostAndPort("node0"));
@@ -1020,15 +1093,21 @@ TEST_F(ReplCoordTest, NodeReturnsOkWhenAWriteConcernWithNoTimeoutHasBeenSatisfie
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1079,15 +1158,21 @@ TEST_F(ReplCoordTest, NodeReturnsWriteConcernFailedWhenAWriteConcernTimesOutBefo
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1120,15 +1205,21 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1161,15 +1252,21 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenSteppingDownBeforeSatisfyingAWrite
// if the node steps down while it is waiting.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1203,12 +1300,15 @@ TEST_F(ReplCoordTest,
// Tests that a thread blocked in awaitReplication can be killed by a killOp operation
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1")
<< BSON("_id" << 1 << "host"
- << "node2") << BSON("_id" << 2 << "host"
- << "node3"))),
+ << "node2")
+ << BSON("_id" << 2 << "host"
+ << "node3"))),
HostAndPort("node1"));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1254,7 +1354,9 @@ private:
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -1277,17 +1379,20 @@ TEST_F(ReplCoordTest, NodeReturnsBadValueWhenUpdateTermIsRunAgainstANonReplNode)
TEST_F(ReplCoordTest, NodeChangesTermAndStepsDownWhenAndOnlyWhenUpdateTermSuppliesAHigherTerm) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))
- << "protocolVersion" << 1),
- HostAndPort("test1", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))
+ << "protocolVersion"
+ << 1),
+ HostAndPort("test1", 1234));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -1324,17 +1429,20 @@ TEST_F(ReplCoordTest, NodeChangesTermAndStepsDownWhenAndOnlyWhenUpdateTermSuppli
TEST_F(ReplCoordTest, ConcurrentStepDownShouldNotSignalTheSameFinishEventMoreThanOnce) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))
- << "protocolVersion" << 1),
- HostAndPort("test1", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))
+ << "protocolVersion"
+ << 1),
+ HostAndPort("test1", 1234));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -1484,7 +1592,9 @@ TEST_F(ReplCoordTest, NodeBecomesPrimaryAgainWhenStepDownTimeoutExpiresInASingle
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -1759,27 +1869,33 @@ TEST_F(ReplCoordTest,
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
}
TEST_F(ReplCoordTest, NodeIncludesOtherMembersProgressInUpdatePositionCommand) {
OperationContextNoop txn;
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234")
- << BSON("_id" << 3 << "host"
- << "test4:1234"))),
- HostAndPort("test1", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234")
+ << BSON("_id" << 3 << "host"
+ << "test4:1234"))),
+ HostAndPort("test1", 1234));
OpTime optime1({2, 1}, 1);
OpTime optime2({100, 1}, 1);
OpTime optime3({100, 2}, 1);
@@ -1846,16 +1962,18 @@ TEST_F(ReplCoordTest, NodeIncludesOtherMembersProgressInUpdatePositionCommand) {
TEST_F(ReplCoordTest, NodeIncludesOtherMembersProgressInOldUpdatePositionCommand) {
OperationContextNoop txn;
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test1", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test1", 1234));
OpTimeWithTermZero optime1(100, 1);
OpTimeWithTermZero optime2(100, 2);
OpTimeWithTermZero optime3(2, 1);
@@ -1899,16 +2017,20 @@ TEST_F(ReplCoordTest, NodeIncludesOtherMembersProgressInOldUpdatePositionCommand
TEST_F(ReplCoordTest,
NodeReturnsOperationFailedWhenSettingMaintenanceModeFalseWhenItHasNotBeenSetTrue) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
OperationContextNoop txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1923,16 +2045,20 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest,
ReportRollbackWhileInBothRollbackAndMaintenanceModeAndRecoveryAfterFinishingRollback) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
OperationContextNoop txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1952,16 +2078,20 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, AllowAsManyUnsetMaintenanceModesAsThereHaveBeenSetMaintenanceModes) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
OperationContextNoop txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1983,16 +2113,20 @@ TEST_F(ReplCoordTest, AllowAsManyUnsetMaintenanceModesAsThereHaveBeenSetMaintena
TEST_F(ReplCoordTest, SettingAndUnsettingMaintenanceModeShouldNotAffectRollbackState) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
OperationContextNoop txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2022,16 +2156,20 @@ TEST_F(ReplCoordTest, SettingAndUnsettingMaintenanceModeShouldNotAffectRollbackS
TEST_F(ReplCoordTest, DoNotAllowMaintenanceModeWhilePrimary) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
OperationContextNoop txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2055,16 +2193,20 @@ TEST_F(ReplCoordTest, DoNotAllowMaintenanceModeWhilePrimary) {
TEST_F(ReplCoordTest, DoNotAllowSettingMaintenanceModeWhileConductingAnElection) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
OperationContextNoop txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2120,7 +2262,9 @@ TEST_F(ReplCoordTest,
HostAndPort client2Host("node3:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << myHost.toString())
<< BSON("_id" << 1 << "host" << client1Host.toString())
<< BSON("_id" << 2 << "host" << client2Host.toString()))),
@@ -2164,7 +2308,9 @@ TEST_F(ReplCoordTest,
HostAndPort client2Host("node3:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << myHost.toString())
<< BSON("_id" << 1 << "host" << client1Host.toString())
<< BSON("_id" << 2 << "host" << client2Host.toString()))),
@@ -2230,14 +2376,19 @@ TEST_F(ReplCoordTest, NodeReturnsNoNodesWhenGetOtherNodesInReplSetIsRunBeforeHav
TEST_F(ReplCoordTest, NodeReturnsListOfNodesOtherThanItselfInResponseToGetOtherNodesInReplSet) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h1")
<< BSON("_id" << 1 << "host"
<< "h2")
<< BSON("_id" << 2 << "host"
<< "h3"
- << "priority" << 0 << "hidden" << true))),
+ << "priority"
+ << 0
+ << "hidden"
+ << true))),
HostAndPort("h1"));
std::vector<HostAndPort> otherNodes = getReplCoord()->getOtherNodesInReplSet();
@@ -2272,17 +2423,20 @@ TEST_F(ReplCoordTest, IsMaster) {
HostAndPort h3("h3");
HostAndPort h4("h4");
assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host" << h1.toString())
- << BSON("_id" << 1 << "host" << h2.toString())
- << BSON("_id" << 2 << "host" << h3.toString() << "arbiterOnly" << true)
- << BSON("_id" << 3 << "host" << h4.toString() << "priority" << 0
- << "tags" << BSON("key1"
- << "value1"
- << "key2"
- << "value2")))),
+ BSON(
+ "_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host" << h1.toString())
+ << BSON("_id" << 1 << "host" << h2.toString())
+ << BSON("_id" << 2 << "host" << h3.toString() << "arbiterOnly" << true)
+ << BSON("_id" << 3 << "host" << h4.toString() << "priority" << 0 << "tags"
+ << BSON("key1"
+ << "value1"
+ << "key2"
+ << "value2")))),
h4);
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
@@ -2337,7 +2491,9 @@ TEST_F(ReplCoordTest, IsMasterWithCommittedSnapshot) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -2374,15 +2530,21 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2405,9 +2567,12 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
UpdatePositionArgs args;
ASSERT_OK(args.initialize(
BSON(UpdatePositionArgs::kCommandFieldName
- << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1
+ << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 0
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 0
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << time2.getTimestamp() << "t" << 2)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
@@ -2422,15 +2587,21 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenOldUpdatePositionContainsInfoAboutSelf
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2452,12 +2623,15 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenOldUpdatePositionContainsInfoAboutSelf
// receive updatePosition containing ourself, should not process the update for self
OldUpdatePositionArgs args;
- ASSERT_OK(args.initialize(
- BSON(OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 0
- << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp)))));
+ ASSERT_OK(args.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 0
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << time2.timestamp)))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
@@ -2468,15 +2642,21 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect)
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2496,9 +2676,12 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect)
UpdatePositionArgs args;
ASSERT_OK(args.initialize(
BSON(UpdatePositionArgs::kCommandFieldName
- << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1
+ << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 3 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << 3
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 1
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << time2.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
@@ -2515,15 +2698,21 @@ TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionWhenItsConfigVersionIsIncorre
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2542,12 +2731,15 @@ TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionWhenItsConfigVersionIsIncorre
// receive updatePosition with incorrect config version
OldUpdatePositionArgs args;
- ASSERT_OK(args.initialize(
- BSON(OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 3 << OldUpdatePositionArgs::kMemberIdFieldName << 1
- << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp)))));
+ ASSERT_OK(args.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 3
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << time2.timestamp)))));
long long cfgver;
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
@@ -2560,15 +2752,21 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConf
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2588,9 +2786,12 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConf
UpdatePositionArgs args;
ASSERT_OK(args.initialize(
BSON(UpdatePositionArgs::kCommandFieldName
- << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1
+ << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 9
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 9
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << time2.getTimestamp() << "t" << 2)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
@@ -2605,15 +2806,21 @@ TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionOfMembersWhoseIdsAreNotInTheC
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2632,12 +2839,15 @@ TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionOfMembersWhoseIdsAreNotInTheC
// receive updatePosition with nonexistent member id
OldUpdatePositionArgs args;
- ASSERT_OK(args.initialize(
- BSON(OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 9
- << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp)))));
+ ASSERT_OK(args.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 9
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << time2.timestamp)))));
ASSERT_EQUALS(ErrorCodes::NodeNotFound, getReplCoord()->processReplSetUpdatePosition(args, 0));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
@@ -2649,15 +2859,21 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2678,15 +2894,22 @@ TEST_F(ReplCoordTest,
getReplCoord()->setMyLastAppliedOpTime(time2);
getReplCoord()->setMyLastDurableOpTime(time2);
OldUpdatePositionArgs args;
- ASSERT_OK(args.initialize(BSON(
- OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 1
- << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp)
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 2
- << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp)))));
+ ASSERT_OK(
+ args.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << time2.timestamp)
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 2
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << time2.timestamp)))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
ASSERT_OK(getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
@@ -2702,10 +2925,13 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"
- << "priority" << 3)
+ << "priority"
+ << 3)
<< BSON("_id" << 1 << "host"
<< "node2:12345")
<< BSON("_id" << 2 << "host"
@@ -2717,15 +2943,21 @@ TEST_F(ReplCoordTest, AwaitReplicationShouldResolveAsNormalDuringAReconfig) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
// Turn off readconcern majority support, and snapshots.
@@ -2790,7 +3022,9 @@ void doReplSetReconfigToFewer(ReplicationCoordinatorImpl* replCoord, Status* sta
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -2804,15 +3038,21 @@ TEST_F(
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 2));
@@ -2860,20 +3100,29 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2)
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
<< BSON("host"
<< "node4:12345"
- << "_id" << 3) << BSON("host"
- << "node5:12345"
- << "_id" << 4))),
+ << "_id"
+ << 3)
+ << BSON("host"
+ << "node5:12345"
+ << "_id"
+ << 4))),
HostAndPort("node1", 12345));
// Turn off readconcern majority support, and snapshots.
@@ -2934,21 +3183,35 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2)
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
<< BSON("host"
<< "node4:12345"
- << "_id" << 3 << "votes" << 0 << "priority" << 0)
+ << "_id"
+ << 3
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("host"
<< "node5:12345"
- << "_id" << 4 << "arbiterOnly" << true))),
+ << "_id"
+ << 4
+ << "arbiterOnly"
+ << true))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
OpTime time(Timestamp(100, 0), 1);
@@ -2990,21 +3253,35 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2)
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
<< BSON("host"
<< "node4:12345"
- << "_id" << 3 << "votes" << 0 << "priority" << 0)
+ << "_id"
+ << 3
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("host"
<< "node5:12345"
- << "_id" << 4 << "arbiterOnly" << true))),
+ << "_id"
+ << 4
+ << "arbiterOnly"
+ << true))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
OpTime zero(Timestamp(0, 0), 0);
@@ -3048,9 +3325,13 @@ TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDurin
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(10, 0));
@@ -3068,9 +3349,13 @@ TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDurin
TEST_F(ReplCoordTest, NodeReturnsInterruptedWhenWaitingUntilAnOpTimeIsInterrupted) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(10, 0));
@@ -3092,9 +3377,13 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesNoOpTi
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
auto result = getReplCoord()->waitUntilOpTime(&txn, ReadConcernArgs());
@@ -3107,9 +3396,13 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTi
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -3125,9 +3418,13 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTi
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
@@ -3170,9 +3467,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord());
@@ -3191,9 +3492,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) {
TEST_F(ReplCoordTest, ReadAfterCommittedInterrupted) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord());
const auto txnPtr = makeOperationContext();
@@ -3215,9 +3520,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedGreaterOpTime) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord());
@@ -3235,9 +3544,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedEqualOpTime) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord());
OpTime time(Timestamp(100, 0), 1);
@@ -3255,22 +3568,24 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord());
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(0, 0), 1));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(0, 0), 1));
OpTime committedOpTime(Timestamp(200, 0), 1);
- auto pseudoLogOp =
- stdx::async(stdx::launch::async,
- [this, &committedOpTime]() {
- // Not guaranteed to be scheduled after waitUntil blocks...
- getReplCoord()->setMyLastAppliedOpTime(committedOpTime);
- getReplCoord()->setMyLastDurableOpTime(committedOpTime);
- getReplCoord()->onSnapshotCreate(committedOpTime, SnapshotName(1));
- });
+ auto pseudoLogOp = stdx::async(stdx::launch::async, [this, &committedOpTime]() {
+ // Not guaranteed to be scheduled after waitUntil blocks...
+ getReplCoord()->setMyLastAppliedOpTime(committedOpTime);
+ getReplCoord()->setMyLastDurableOpTime(committedOpTime);
+ getReplCoord()->onSnapshotCreate(committedOpTime, SnapshotName(1));
+ });
auto result = getReplCoord()->waitUntilOpTime(
&txn,
@@ -3285,9 +3600,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord());
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(0, 0), 1));
@@ -3295,14 +3614,12 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
OpTime opTimeToWait(Timestamp(100, 0), 1);
- auto pseudoLogOp =
- stdx::async(stdx::launch::async,
- [this, &opTimeToWait]() {
- // Not guaranteed to be scheduled after waitUntil blocks...
- getReplCoord()->setMyLastAppliedOpTime(opTimeToWait);
- getReplCoord()->setMyLastDurableOpTime(opTimeToWait);
- getReplCoord()->onSnapshotCreate(opTimeToWait, SnapshotName(1));
- });
+ auto pseudoLogOp = stdx::async(stdx::launch::async, [this, &opTimeToWait]() {
+ // Not guaranteed to be scheduled after waitUntil blocks...
+ getReplCoord()->setMyLastAppliedOpTime(opTimeToWait);
+ getReplCoord()->setMyLastDurableOpTime(opTimeToWait);
+ getReplCoord()->onSnapshotCreate(opTimeToWait, SnapshotName(1));
+ });
auto result = getReplCoord()->waitUntilOpTime(
&txn, ReadConcernArgs(opTimeToWait, ReadConcernLevel::kMajorityReadConcern));
@@ -3316,34 +3633,53 @@ TEST_F(ReplCoordTest, IgnoreTheContentsOfMetadataWhenItsConfigVersionDoesNotMatc
// Ensure that we do not process ReplSetMetadata when ConfigVersions do not match.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
// lower configVersion
StatusWith<rpc::ReplSetMetadata> metadata = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "configVersion" << 1
- << "primaryIndex" << 2 << "term" << 2 << "syncSourceIndex" << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2)
+ << "configVersion"
+ << 1
+ << "primaryIndex"
+ << 2
+ << "term"
+ << 2
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata.getValue());
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
// higher configVersion
- StatusWith<rpc::ReplSetMetadata> metadata2 = rpc::ReplSetMetadata::readFromMetadata(
- BSON(rpc::kReplSetMetadataFieldName
- << BSON("lastOpCommitted"
- << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "configVersion" << 100
- << "primaryIndex" << 2 << "term" << 2 << "syncSourceIndex" << 1)));
+ StatusWith<rpc::ReplSetMetadata> metadata2 = rpc::ReplSetMetadata::readFromMetadata(BSON(
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2)
+ << "configVersion"
+ << 100
+ << "primaryIndex"
+ << 2
+ << "term"
+ << 2
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata2.getValue());
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
}
@@ -3353,16 +3689,23 @@ TEST_F(ReplCoordTest, UpdateLastCommittedOpTimeWhenTheLastCommittedOpTimeFromMet
// but not if the OpTime is older than the current LastCommittedOpTime.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))
- << "protocolVersion" << 1),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))
+ << "protocolVersion"
+ << 1),
HostAndPort("node1", 12345));
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
@@ -3375,20 +3718,34 @@ TEST_F(ReplCoordTest, UpdateLastCommittedOpTimeWhenTheLastCommittedOpTimeFromMet
// higher OpTime, should change
StatusWith<rpc::ReplSetMetadata> metadata = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 1) << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 1) << "configVersion" << 2
- << "primaryIndex" << 2 << "term" << 1 << "syncSourceIndex" << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 1) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 1)
+ << "configVersion"
+ << 2
+ << "primaryIndex"
+ << 2
+ << "term"
+ << 1
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata.getValue());
ASSERT_EQUALS(OpTime(Timestamp(10, 0), 1), getReplCoord()->getLastCommittedOpTime());
ASSERT_EQUALS(OpTime(Timestamp(10, 0), 1), getReplCoord()->getCurrentCommittedSnapshotOpTime());
// lower OpTime, should not change
StatusWith<rpc::ReplSetMetadata> metadata2 = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(9, 0) << "t" << 1) << "lastOpVisible"
- << BSON("ts" << Timestamp(9, 0) << "t" << 1) << "configVersion" << 2
- << "primaryIndex" << 2 << "term" << 1 << "syncSourceIndex" << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(9, 0) << "t" << 1) << "lastOpVisible"
+ << BSON("ts" << Timestamp(9, 0) << "t" << 1)
+ << "configVersion"
+ << 2
+ << "primaryIndex"
+ << 2
+ << "term"
+ << 1
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata2.getValue());
ASSERT_EQUALS(OpTime(Timestamp(10, 0), 1), getReplCoord()->getLastCommittedOpTime());
}
@@ -3398,16 +3755,23 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
// Ensure that currentPrimaryIndex is never altered by ReplSetMetadata.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))
- << "protocolVersion" << 1),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))
+ << "protocolVersion"
+ << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
OperationContextNoop txn;
@@ -3416,10 +3780,17 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
// higher term, should change
StatusWith<rpc::ReplSetMetadata> metadata = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "configVersion" << 2
- << "primaryIndex" << 2 << "term" << 3 << "syncSourceIndex" << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 3)
+ << "configVersion"
+ << 2
+ << "primaryIndex"
+ << 2
+ << "term"
+ << 3
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata.getValue());
ASSERT_EQUALS(OpTime(Timestamp(10, 0), 3), getReplCoord()->getLastCommittedOpTime());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
@@ -3427,10 +3798,17 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
// lower term, should not change
StatusWith<rpc::ReplSetMetadata> metadata2 = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastOpVisible"
- << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "configVersion" << 2
- << "primaryIndex" << 1 << "term" << 2 << "syncSourceIndex" << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastOpVisible"
+ << BSON("ts" << Timestamp(11, 0) << "t" << 3)
+ << "configVersion"
+ << 2
+ << "primaryIndex"
+ << 1
+ << "term"
+ << 2
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata2.getValue());
ASSERT_EQUALS(OpTime(Timestamp(11, 0), 3), getReplCoord()->getLastCommittedOpTime());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
@@ -3438,10 +3816,17 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
// same term, should not change
StatusWith<rpc::ReplSetMetadata> metadata3 = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastOpVisible"
- << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "configVersion" << 2
- << "primaryIndex" << 1 << "term" << 3 << "syncSourceIndex" << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastOpVisible"
+ << BSON("ts" << Timestamp(11, 0) << "t" << 3)
+ << "configVersion"
+ << 2
+ << "primaryIndex"
+ << 1
+ << "term"
+ << 3
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata3.getValue());
ASSERT_EQUALS(OpTime(Timestamp(11, 0), 3), getReplCoord()->getLastCommittedOpTime());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
@@ -3453,13 +3838,19 @@ TEST_F(ReplCoordTest,
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0)
- << BSON("host"
- << "node2:12345"
- << "_id" << 1))
- << "protocolVersion" << 1),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1))
+ << "protocolVersion"
+ << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
OperationContextNoop txn;
@@ -3473,9 +3864,15 @@ TEST_F(ReplCoordTest,
StatusWith<rpc::ReplSetMetadata> metadata = rpc::ReplSetMetadata::readFromMetadata(BSON(
rpc::kReplSetMetadataFieldName
<< BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "configVersion"
- << config.getConfigVersion() << "primaryIndex" << 1 << "term" << 3
- << "syncSourceIndex" << 1)));
+ << BSON("ts" << Timestamp(10, 0) << "t" << 3)
+ << "configVersion"
+ << config.getConfigVersion()
+ << "primaryIndex"
+ << 1
+ << "term"
+ << 3
+ << "syncSourceIndex"
+ << 1)));
BSONObjBuilder metadataBuilder;
ASSERT_OK(metadata.getValue().writeToMetadata(&metadataBuilder));
auto metadataObj = metadataBuilder.obj();
@@ -3506,13 +3903,19 @@ TEST_F(ReplCoordTest,
ScheduleElectionToBeRunInElectionTimeoutFromNowWhenCancelAndRescheduleElectionTimeoutIsRun) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -3551,13 +3954,19 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunInPV0) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 0 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 0
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
ASSERT_TRUE(replCoord->setFollowerMode(MemberState::RS_SECONDARY));
@@ -3571,13 +3980,19 @@ TEST_F(ReplCoordTest, DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeou
TEST_F(ReplCoordTest, DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunInRollback) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
ASSERT_TRUE(replCoord->setFollowerMode(MemberState::RS_ROLLBACK));
@@ -3592,13 +4007,23 @@ TEST_F(ReplCoordTest,
DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunWhileUnelectable) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0 << "priority" << 0 << "hidden" << true)
+ << "_id"
+ << 0
+ << "priority"
+ << 0
+ << "hidden"
+ << true)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
ASSERT_TRUE(replCoord->setFollowerMode(MemberState::RS_SECONDARY));
@@ -3613,13 +4038,19 @@ TEST_F(ReplCoordTest,
DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunWhileRemoved) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -3644,10 +4075,15 @@ TEST_F(ReplCoordTest,
ReplicaSetConfig config;
config.initialize(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 3 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node2:12345"
- << "_id" << 1))));
+ << "_id"
+ << 1))));
hbResp.setConfig(config);
hbResp.setConfigVersion(3);
hbResp.setSetName("mySet");
@@ -3668,13 +4104,19 @@ TEST_F(ReplCoordTest,
CancelAndRescheduleElectionTimeoutWhenProcessingHeartbeatResponseFromPrimary) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -3714,13 +4156,19 @@ TEST_F(ReplCoordTest,
CancelAndRescheduleElectionTimeoutWhenProcessingHeartbeatResponseWithoutState) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -3760,7 +4208,9 @@ TEST_F(ReplCoordTest, AdvanceCommittedSnapshotToMostRecentSnapshotPriorToOpTimeW
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3791,7 +4241,9 @@ TEST_F(ReplCoordTest, DoNotAdvanceCommittedSnapshotWhenAnOpTimeIsNewerThanOurLat
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3820,7 +4272,9 @@ TEST_F(ReplCoordTest,
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3851,7 +4305,9 @@ TEST_F(ReplCoordTest, ZeroCommittedSnapshotWhenAllSnapshotsAreDropped) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3878,7 +4334,9 @@ TEST_F(ReplCoordTest, DoNotAdvanceCommittedSnapshotWhenAppliedOpTimeChanges) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3902,9 +4360,13 @@ TEST_F(ReplCoordTest,
NodeChangesMyLastOpTimeWhenAndOnlyWhensetMyLastDurableOpTimeReceivesANewerOpTime4DurableSE) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
@@ -3925,13 +4387,18 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))
- << "protocolVersion" << 1 << "settings"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))
+ << "protocolVersion"
+ << 1
+ << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("test1", 1234));
OpTime optime(Timestamp(100, 2), 0);
@@ -4020,20 +4487,32 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2) << BSON("host"
- << "node4:12345"
- << "_id" << 3)
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id"
+ << 3)
<< BSON("host"
<< "node5:12345"
- << "_id" << 4)) << "protocolVersion" << 1 << "settings"
+ << "_id"
+ << 4))
+ << "protocolVersion"
+ << 1
+ << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -4043,23 +4522,34 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
// Receive notification that every node is up.
OldUpdatePositionArgs args;
- ASSERT_OK(args.initialize(BSON(
- OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(
- BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 1
- << OldUpdatePositionArgs::kOpTimeFieldName << startingOpTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 2
- << OldUpdatePositionArgs::kOpTimeFieldName << startingOpTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 3
- << OldUpdatePositionArgs::kOpTimeFieldName << startingOpTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 4
- << OldUpdatePositionArgs::kOpTimeFieldName
- << startingOpTime.getTimestamp())))));
+ ASSERT_OK(
+ args.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << startingOpTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 2
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << startingOpTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 3
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << startingOpTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 4
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << startingOpTime.getTimestamp())))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
// Become PRIMARY.
@@ -4067,17 +4557,22 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
// Keep two nodes alive.
OldUpdatePositionArgs args1;
- ASSERT_OK(args1.initialize(
- BSON(OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 1
- << OldUpdatePositionArgs::kOpTimeFieldName
- << startingOpTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 2
- << OldUpdatePositionArgs::kOpTimeFieldName
- << startingOpTime.getTimestamp())))));
+ ASSERT_OK(
+ args1.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << startingOpTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 2
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << startingOpTime.getTimestamp())))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args1, 0));
// Confirm that the node remains PRIMARY after the other two nodes are marked DOWN.
@@ -4091,10 +4586,13 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
OldUpdatePositionArgs args2;
ASSERT_OK(
args2.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName
- << 1 << OldUpdatePositionArgs::kOpTimeFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
<< startingOpTime.getTimestamp())))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args2, 0));
@@ -4131,7 +4629,9 @@ TEST_F(ReplCoordTest, WaitForMemberState) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -4166,7 +4666,9 @@ TEST_F(ReplCoordTest, WaitForDrainFinish) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -4205,39 +4707,62 @@ TEST_F(ReplCoordTest, UpdatePositionArgsReturnsNoSuchKeyWhenParsingOldUpdatePosi
OpTime opTime = OpTime(Timestamp(100, 1), 0);
ASSERT_EQUALS(
ErrorCodes::NoSuchKey,
- args2.initialize(BSON(
- OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(
- BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 1
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 2
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 3
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 4
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())))));
-
- ASSERT_OK(args.initialize(BSON(
- OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 1
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 2
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 3
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 4
- << OldUpdatePositionArgs::kOpTimeFieldName
- << opTime.getTimestamp())))));
+ args2.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 2
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 3
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 4
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())))));
+
+ ASSERT_OK(
+ args.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 2
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 3
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 4
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())))));
}
@@ -4248,54 +4773,72 @@ TEST_F(ReplCoordTest, OldUpdatePositionArgsReturnsBadValueWhenParsingUpdatePosit
ASSERT_EQUALS(ErrorCodes::BadValue,
args.initialize(BSON(
UpdatePositionArgs::kCommandFieldName
- << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1
+ << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 1
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 2
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 3
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 3
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 4
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 4
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))))));
ASSERT_OK(args2.initialize(
BSON(UpdatePositionArgs::kCommandFieldName
- << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1
+ << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 1
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 2
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 3
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 3
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 4
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 4
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
@@ -4309,10 +4852,13 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault" << false),
+ << "writeConcernMajorityJournalDefault"
+ << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -4329,10 +4875,13 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault" << true),
+ << "writeConcernMajorityJournalDefault"
+ << true),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -4347,10 +4896,13 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfSync
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault" << false),
+ << "writeConcernMajorityJournalDefault"
+ << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -4372,10 +4924,13 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfWMod
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault" << false),
+ << "writeConcernMajorityJournalDefault"
+ << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp
index fc0f9866f55..e8617423953 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_mock.cpp
@@ -32,12 +32,12 @@
#include "mongo/base/status.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/write_concern_options.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/read_concern_response.h"
#include "mongo/db/repl/replica_set_config.h"
#include "mongo/db/repl/sync_source_resolver.h"
#include "mongo/db/storage/snapshot_name.h"
+#include "mongo/db/write_concern_options.h"
#include "mongo/util/assert_util.h"
namespace mongo {
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index 7d6d29614d2..319f67c3893 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -34,9 +34,9 @@
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/repl/is_master_response.h"
-#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/repl_set_heartbeat_args.h"
#include "mongo/db/repl/repl_set_heartbeat_args_v1.h"
+#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replication_coordinator_external_state_mock.h"
#include "mongo/db/repl/replication_coordinator_impl.h"
#include "mongo/db/repl/storage_interface_mock.h"
@@ -238,13 +238,14 @@ void ReplCoordTest::simulateSuccessfulDryRun(
if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
ASSERT_TRUE(request.cmdObj.getBoolField("dryRun"));
onDryRunRequest(request);
- net->scheduleResponse(
- noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term" << request.cmdObj["term"].Long()
- << "voteGranted" << true)));
+ net->scheduleResponse(noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term"
+ << request.cmdObj["term"].Long()
+ << "voteGranted"
+ << true)));
voteRequests++;
} else {
error() << "Black holing unexpected request to " << request.target << ": "
@@ -298,13 +299,14 @@ void ReplCoordTest::simulateSuccessfulV1Election() {
hbResp.setConfigVersion(rsConfig.getConfigVersion());
net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
} else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
- net->scheduleResponse(
- noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term" << request.cmdObj["term"].Long()
- << "voteGranted" << true)));
+ net->scheduleResponse(noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term"
+ << request.cmdObj["term"].Long()
+ << "voteGranted"
+ << true)));
} else {
error() << "Black holing unexpected request to " << request.target << ": "
<< request.cmdObj;
@@ -359,8 +361,8 @@ void ReplCoordTest::simulateSuccessfulElection() {
net->scheduleResponse(
noi,
net->now(),
- makeResponseStatus(BSON("ok" << 1 << "fresher" << false << "opTime" << Date_t()
- << "veto" << false)));
+ makeResponseStatus(BSON(
+ "ok" << 1 << "fresher" << false << "opTime" << Date_t() << "veto" << false)));
} else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetElect") {
net->scheduleResponse(noi,
net->now(),
diff --git a/src/mongo/db/repl/replication_executor.cpp b/src/mongo/db/repl/replication_executor.cpp
index 0caabfe808d..0154c5b53e7 100644
--- a/src/mongo/db/repl/replication_executor.cpp
+++ b/src/mongo/db/repl/replication_executor.cpp
@@ -310,8 +310,8 @@ void ReplicationExecutor::_finishRemoteCommand(const RemoteCommandRequest& reque
return;
}
- LOG(4) << "Received remote response: " << (response.isOK() ? response.getValue().toString()
- : response.getStatus().toString());
+ LOG(4) << "Received remote response: "
+ << (response.isOK() ? response.getValue().toString() : response.getStatus().toString());
callback->_callbackFn =
stdx::bind(remoteCommandFinished, stdx::placeholders::_1, cb, request, response);
diff --git a/src/mongo/db/repl/replication_executor_test.cpp b/src/mongo/db/repl/replication_executor_test.cpp
index ceb4ec89beb..7b2de1e1fe1 100644
--- a/src/mongo/db/repl/replication_executor_test.cpp
+++ b/src/mongo/db/repl/replication_executor_test.cpp
@@ -31,14 +31,14 @@
#include <map>
#include "mongo/base/init.h"
-#include "mongo/executor/task_executor_test_common.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/replication_executor.h"
#include "mongo/db/repl/replication_executor_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
-#include "mongo/stdx/memory.h"
+#include "mongo/executor/task_executor_test_common.h"
#include "mongo/stdx/functional.h"
+#include "mongo/stdx/memory.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/barrier.h"
#include "mongo/unittest/unittest.h"
@@ -56,11 +56,10 @@ using unittest::assertGet;
const int64_t prngSeed = 1;
MONGO_INITIALIZER(ReplExecutorCommonTests)(InitializerContext*) {
- mongo::executor::addTestsForExecutor("ReplicationExecutorCommon",
- [](std::unique_ptr<executor::NetworkInterfaceMock>* net) {
- return stdx::make_unique<ReplicationExecutor>(
- net->release(), prngSeed);
- });
+ mongo::executor::addTestsForExecutor(
+ "ReplicationExecutorCommon", [](std::unique_ptr<executor::NetworkInterfaceMock>* net) {
+ return stdx::make_unique<ReplicationExecutor>(net->release(), prngSeed);
+ });
return Status::OK();
}
@@ -71,16 +70,19 @@ TEST_F(ReplicationExecutorTest, ScheduleDBWorkAndExclusiveWorkConcurrently) {
Status status1 = getDetectableErrorStatus();
OperationContext* txn = nullptr;
using CallbackData = ReplicationExecutor::CallbackArgs;
- ASSERT_OK(executor.scheduleDBWork([&](const CallbackData& cbData) {
- status1 = cbData.status;
- txn = cbData.txn;
- barrier.countDownAndWait();
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }).getStatus());
- ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
- barrier.countDownAndWait();
- }).getStatus());
+ ASSERT_OK(executor
+ .scheduleDBWork([&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ txn = cbData.txn;
+ barrier.countDownAndWait();
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ })
+ .getStatus());
+ ASSERT_OK(executor
+ .scheduleWorkWithGlobalExclusiveLock(
+ [&](const CallbackData& cbData) { barrier.countDownAndWait(); })
+ .getStatus());
executor.run();
ASSERT_OK(status1);
ASSERT(txn);
@@ -93,14 +95,20 @@ TEST_F(ReplicationExecutorTest, ScheduleDBWorkWithCollectionLock) {
OperationContext* txn = nullptr;
bool collectionIsLocked = false;
using CallbackData = ReplicationExecutor::CallbackArgs;
- ASSERT_OK(executor.scheduleDBWork([&](const CallbackData& cbData) {
- status1 = cbData.status;
- txn = cbData.txn;
- collectionIsLocked =
- txn ? txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X) : false;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }, nss, MODE_X).getStatus());
+ ASSERT_OK(executor
+ .scheduleDBWork(
+ [&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ txn = cbData.txn;
+ collectionIsLocked = txn
+ ? txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X)
+ : false;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ },
+ nss,
+ MODE_X)
+ .getStatus());
executor.run();
ASSERT_OK(status1);
ASSERT(txn);
@@ -113,13 +121,15 @@ TEST_F(ReplicationExecutorTest, ScheduleExclusiveLockOperation) {
OperationContext* txn = nullptr;
bool lockIsW = false;
using CallbackData = ReplicationExecutor::CallbackArgs;
- ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
- status1 = cbData.status;
- txn = cbData.txn;
- lockIsW = txn ? txn->lockState()->isW() : false;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }).getStatus());
+ ASSERT_OK(executor
+ .scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ txn = cbData.txn;
+ lockIsW = txn ? txn->lockState()->isW() : false;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ })
+ .getStatus());
executor.run();
ASSERT_OK(status1);
ASSERT(txn);
@@ -130,20 +140,24 @@ TEST_F(ReplicationExecutorTest, ShutdownBeforeRunningSecondExclusiveLockOperatio
ReplicationExecutor& executor = getReplExecutor();
using CallbackData = ReplicationExecutor::CallbackArgs;
Status status1 = getDetectableErrorStatus();
- ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
- status1 = cbData.status;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }).getStatus());
+ ASSERT_OK(executor
+ .scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ })
+ .getStatus());
// Second db work item is invoked by the main executor thread because the work item is
// moved from the exclusive lock queue to the ready work item queue when the first callback
// cancels the executor.
Status status2 = getDetectableErrorStatus();
- ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
- status2 = cbData.status;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }).getStatus());
+ ASSERT_OK(executor
+ .scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
+ status2 = cbData.status;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ })
+ .getStatus());
executor.run();
ASSERT_OK(status1);
ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status2.code());
@@ -153,13 +167,12 @@ TEST_F(ReplicationExecutorTest, CancelBeforeRunningFutureWork) {
ReplicationExecutor& executor = getReplExecutor();
using CallbackData = ReplicationExecutor::CallbackArgs;
Status status1 = getDetectableErrorStatus();
- auto cbhWithStatus =
- executor.scheduleWorkAt(executor.now() + Milliseconds(1000),
- [&](const CallbackData& cbData) {
- status1 = cbData.status;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- });
+ auto cbhWithStatus = executor.scheduleWorkAt(
+ executor.now() + Milliseconds(1000), [&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ });
ASSERT_OK(cbhWithStatus.getStatus());
ASSERT_EQUALS(1, executor.getDiagnosticBSON().getFieldDotted("queues.sleepers").Int());
diff --git a/src/mongo/db/repl/replication_executor_test_fixture.h b/src/mongo/db/repl/replication_executor_test_fixture.h
index 73e5ae2c504..7b7845302d7 100644
--- a/src/mongo/db/repl/replication_executor_test_fixture.h
+++ b/src/mongo/db/repl/replication_executor_test_fixture.h
@@ -28,9 +28,9 @@
#pragma once
-#include "mongo/stdx/memory.h"
#include "mongo/db/repl/replication_executor.h"
#include "mongo/executor/task_executor_test_fixture.h"
+#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
diff --git a/src/mongo/db/repl/replset_commands.cpp b/src/mongo/db/repl/replset_commands.cpp
index 70aaa8b7b6d..c91e28b71a6 100644
--- a/src/mongo/db/repl/replset_commands.cpp
+++ b/src/mongo/db/repl/replset_commands.cpp
@@ -47,8 +47,8 @@
#include "mongo/db/repl/initial_sync.h"
#include "mongo/db/repl/old_update_position_args.h"
#include "mongo/db/repl/oplog.h"
-#include "mongo/db/repl/repl_set_heartbeat_args_v1.h"
#include "mongo/db/repl/repl_set_heartbeat_args.h"
+#include "mongo/db/repl/repl_set_heartbeat_args_v1.h"
#include "mongo/db/repl/repl_set_heartbeat_response.h"
#include "mongo/db/repl/replication_coordinator_external_state_impl.h"
#include "mongo/db/repl/replication_coordinator_global.h"
@@ -451,7 +451,8 @@ public:
txn,
BSON("msg"
<< "Reconfig set"
- << "version" << parsedArgs.newConfigObj["version"]));
+ << "version"
+ << parsedArgs.newConfigObj["version"]));
}
wuow.commit();
@@ -837,7 +838,8 @@ public:
BSONElement cfgverElement = cmdObj["cfgver"];
uassert(28525,
str::stream() << "Expected cfgver argument to replSetFresh command to have "
- "numeric type, but found " << typeName(cfgverElement.type()),
+ "numeric type, but found "
+ << typeName(cfgverElement.type()),
cfgverElement.isNumber());
parsedArgs.cfgver = cfgverElement.safeNumberLong();
parsedArgs.opTime = Timestamp(cmdObj["opTime"].Date());
@@ -871,7 +873,8 @@ private:
BSONElement cfgverElement = cmdObj["cfgver"];
uassert(28526,
str::stream() << "Expected cfgver argument to replSetElect command to have "
- "numeric type, but found " << typeName(cfgverElement.type()),
+ "numeric type, but found "
+ << typeName(cfgverElement.type()),
cfgverElement.isNumber());
parsedArgs.cfgver = cfgverElement.safeNumberLong();
parsedArgs.round = cmdObj["round"].OID();
diff --git a/src/mongo/db/repl/replset_web_handler.cpp b/src/mongo/db/repl/replset_web_handler.cpp
index 6ec53363396..3f67cd9a45c 100644
--- a/src/mongo/db/repl/replset_web_handler.cpp
+++ b/src/mongo/db/repl/replset_web_handler.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/dbwebserver.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/repl/repl_set_html_summary.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/repl/rslog.h"
#include "mongo/util/mongoutils/html.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/repl/reporter_test.cpp b/src/mongo/db/repl/reporter_test.cpp
index 3d6b8b81b43..926df8f3a62 100644
--- a/src/mongo/db/repl/reporter_test.cpp
+++ b/src/mongo/db/repl/reporter_test.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/reporter.h"
#include "mongo/db/repl/update_position_args.h"
-#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
+#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/task_executor_proxy.h"
#include "mongo/unittest/unittest.h"
@@ -370,7 +370,8 @@ TEST_F(ReporterTestNoTriggerAtSetUp,
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "newer config"
- << "configVersion" << 100));
+ << "configVersion"
+ << 100));
ASSERT_EQUALS(Status(ErrorCodes::InvalidReplicaSetConfig, "invalid config"), reporter->join());
assertReporterDone();
@@ -389,7 +390,8 @@ TEST_F(ReporterTest, InvalidReplicaSetResponseWithSameConfigVersionOnSyncTargetS
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "invalid config"
- << "configVersion" << posUpdater->getConfigVersion()));
+ << "configVersion"
+ << posUpdater->getConfigVersion()));
ASSERT_EQUALS(Status(ErrorCodes::InvalidReplicaSetConfig, "invalid config"), reporter->join());
assertReporterDone();
@@ -406,7 +408,8 @@ TEST_F(
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "newer config"
- << "configVersion" << posUpdater->getConfigVersion() + 1));
+ << "configVersion"
+ << posUpdater->getConfigVersion() + 1));
ASSERT_TRUE(reporter->isActive());
}
@@ -430,7 +433,8 @@ TEST_F(
commandRequest = processNetworkResponse(
BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig) << "errmsg"
<< "newer config"
- << "configVersion" << posUpdater->getConfigVersion() + 1));
+ << "configVersion"
+ << posUpdater->getConfigVersion() + 1));
ASSERT_EQUALS(expectedOldStyleCommandRequest, commandRequest);
ASSERT_TRUE(reporter->isActive());
@@ -526,7 +530,7 @@ TEST_F(ReporterTestNoTriggerAtSetUp, CommandPreparationFailureStopsTheReporter)
Status expectedStatus(ErrorCodes::UnknownError, "unknown error");
prepareReplSetUpdatePositionCommandFn =
[expectedStatus](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle commandStyle)
- -> StatusWith<BSONObj> { return expectedStatus; };
+ -> StatusWith<BSONObj> { return expectedStatus; };
ASSERT_OK(reporter->trigger());
ASSERT_EQUALS(expectedStatus, reporter->join());
@@ -544,7 +548,7 @@ TEST_F(ReporterTest, CommandPreparationFailureDuringRescheduleStopsTheReporter)
Status expectedStatus(ErrorCodes::UnknownError, "unknown error");
prepareReplSetUpdatePositionCommandFn =
[expectedStatus](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle commandStyle)
- -> StatusWith<BSONObj> { return expectedStatus; };
+ -> StatusWith<BSONObj> { return expectedStatus; };
processNetworkResponse(BSON("ok" << 1));
@@ -704,7 +708,7 @@ TEST_F(ReporterTest, KeepAliveTimeoutFailingToScheduleRemoteCommandShouldMakeRep
Status expectedStatus(ErrorCodes::UnknownError, "failed to prepare update command");
prepareReplSetUpdatePositionCommandFn =
[expectedStatus](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle commandStyle)
- -> StatusWith<BSONObj> { return expectedStatus; };
+ -> StatusWith<BSONObj> { return expectedStatus; };
runUntil(until);
diff --git a/src/mongo/db/repl/resync.cpp b/src/mongo/db/repl/resync.cpp
index 674b43f969a..86797724eed 100644
--- a/src/mongo/db/repl/resync.cpp
+++ b/src/mongo/db/repl/resync.cpp
@@ -28,10 +28,10 @@
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/repl/bgsync.h"
#include "mongo/db/repl/master_slave.h" // replSettings
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/operation_context.h"
namespace mongo {
diff --git a/src/mongo/db/repl/roll_back_local_operations_test.cpp b/src/mongo/db/repl/roll_back_local_operations_test.cpp
index 06af9890571..87e888a62d0 100644
--- a/src/mongo/db/repl/roll_back_local_operations_test.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations_test.cpp
@@ -121,8 +121,9 @@ TEST(RollBackLocalOperationsTest, RollbackOperationFailed) {
makeOpAndRecordId(2, 1), commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
- auto rollbackOperation =
- [&](const BSONObj& operation) { return Status(ErrorCodes::OperationFailed, ""); };
+ auto rollbackOperation = [&](const BSONObj& operation) {
+ return Status(ErrorCodes::OperationFailed, "");
+ };
RollBackLocalOperations finder(localOplog, rollbackOperation);
auto result = finder.onRemoteOperation(commonOperation.first);
ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
@@ -211,8 +212,9 @@ TEST(RollBackLocalOperationsTest, SameTimestampDifferentHashesRollbackOperationF
makeOpAndRecordId(1, 3), commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
- auto rollbackOperation =
- [&](const BSONObj& operation) { return Status(ErrorCodes::OperationFailed, ""); };
+ auto rollbackOperation = [&](const BSONObj& operation) {
+ return Status(ErrorCodes::OperationFailed, "");
+ };
RollBackLocalOperations finder(localOplog, rollbackOperation);
auto result = finder.onRemoteOperation(makeOp(1, 2));
ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
diff --git a/src/mongo/db/repl/rollback_checker.cpp b/src/mongo/db/repl/rollback_checker.cpp
index 7d1a710982a..cb86eb2a811 100644
--- a/src/mongo/db/repl/rollback_checker.cpp
+++ b/src/mongo/db/repl/rollback_checker.cpp
@@ -49,32 +49,34 @@ RollbackChecker::RollbackChecker(executor::TaskExecutor* executor, HostAndPort s
RollbackChecker::~RollbackChecker() {}
RollbackChecker::CallbackHandle RollbackChecker::checkForRollback(const CallbackFn& nextAction) {
- return _scheduleGetRollbackId([this, nextAction](const RemoteCommandCallbackArgs& args) {
- if (args.response.getStatus() == ErrorCodes::CallbackCanceled) {
- return;
- }
- if (!args.response.isOK()) {
- nextAction(args.response.getStatus());
- return;
- }
- if (auto rbidElement = args.response.getValue().data["rbid"]) {
- int remoteRBID = rbidElement.numberInt();
-
- UniqueLock lk(_mutex);
- bool hadRollback = _checkForRollback_inlock(remoteRBID);
- lk.unlock();
-
- if (hadRollback) {
- nextAction(Status(ErrorCodes::UnrecoverableRollbackError,
- "RollbackChecker detected rollback occurred"));
+ return _scheduleGetRollbackId(
+ [this, nextAction](const RemoteCommandCallbackArgs& args) {
+ if (args.response.getStatus() == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ if (!args.response.isOK()) {
+ nextAction(args.response.getStatus());
+ return;
+ }
+ if (auto rbidElement = args.response.getValue().data["rbid"]) {
+ int remoteRBID = rbidElement.numberInt();
+
+ UniqueLock lk(_mutex);
+ bool hadRollback = _checkForRollback_inlock(remoteRBID);
+ lk.unlock();
+
+ if (hadRollback) {
+ nextAction(Status(ErrorCodes::UnrecoverableRollbackError,
+ "RollbackChecker detected rollback occurred"));
+ } else {
+ nextAction(Status::OK());
+ }
} else {
- nextAction(Status::OK());
+ nextAction(Status(ErrorCodes::CommandFailed,
+ "replSetGetRBID command failed when checking for rollback"));
}
- } else {
- nextAction(Status(ErrorCodes::CommandFailed,
- "replSetGetRBID command failed when checking for rollback"));
- }
- }, nextAction);
+ },
+ nextAction);
}
bool RollbackChecker::hasHadRollback() {
@@ -87,27 +89,29 @@ bool RollbackChecker::hasHadRollback() {
}
RollbackChecker::CallbackHandle RollbackChecker::reset(const CallbackFn& nextAction) {
- return _scheduleGetRollbackId([this, nextAction](const RemoteCommandCallbackArgs& args) {
- if (args.response.getStatus() == ErrorCodes::CallbackCanceled) {
- return;
- }
- if (!args.response.isOK()) {
- nextAction(args.response.getStatus());
- return;
- }
- if (auto rbidElement = args.response.getValue().data["rbid"]) {
- int newRBID = rbidElement.numberInt();
-
- UniqueLock lk(_mutex);
- _setRBID_inlock(newRBID);
- lk.unlock();
-
- nextAction(Status::OK());
- } else {
- nextAction(Status(ErrorCodes::CommandFailed,
- "replSetGetRBID command failed when checking for rollback"));
- }
- }, nextAction);
+ return _scheduleGetRollbackId(
+ [this, nextAction](const RemoteCommandCallbackArgs& args) {
+ if (args.response.getStatus() == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ if (!args.response.isOK()) {
+ nextAction(args.response.getStatus());
+ return;
+ }
+ if (auto rbidElement = args.response.getValue().data["rbid"]) {
+ int newRBID = rbidElement.numberInt();
+
+ UniqueLock lk(_mutex);
+ _setRBID_inlock(newRBID);
+ lk.unlock();
+
+ nextAction(Status::OK());
+ } else {
+ nextAction(Status(ErrorCodes::CommandFailed,
+ "replSetGetRBID command failed when checking for rollback"));
+ }
+ },
+ nextAction);
}
Status RollbackChecker::reset_sync() {
diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp
index 443e242deb2..f416af8c716 100644
--- a/src/mongo/db/repl/rollback_source_impl.cpp
+++ b/src/mongo/db/repl/rollback_source_impl.cpp
@@ -35,8 +35,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/oplogreader.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
namespace repl {
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 2f31e9a63d7..fe25036f399 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -40,10 +40,9 @@
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/client.h"
#include "mongo/db/cloner.h"
-#include "mongo/db/db_raii.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/db_raii.h"
#include "mongo/db/dbhelpers.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/repl/bgsync.h"
#include "mongo/db/repl/initial_sync.h"
@@ -52,6 +51,7 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/repl/storage_interface.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/exit.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
@@ -143,7 +143,8 @@ void checkAdminDatabasePostClone(OperationContext* txn, Database* adminDb) {
<< " but could not find an auth schema version document in "
<< AuthorizationManager::versionCollectionNamespace;
severe() << "This indicates that the primary of this replica set was not successfully "
- "upgraded to schema version " << AuthorizationManager::schemaVersion26Final
+ "upgraded to schema version "
+ << AuthorizationManager::schemaVersion26Final
<< ", which is the minimum supported schema version in this version of MongoDB";
fassertFailedNoTrace(28620);
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index bb5531dbf34..0663c13e4dd 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -37,17 +37,17 @@
#include <memory>
#include "mongo/bson/util/bson_extract.h"
-#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/authorization_manager.h"
+#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/db_raii.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/db_raii.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/ops/update.h"
#include "mongo/db/ops/update_lifecycle_impl.h"
@@ -454,7 +454,8 @@ void syncFixUp(OperationContext* txn,
auto status = options.parse(optionsField.Obj());
if (!status.isOK()) {
throw RSFatalException(str::stream() << "Failed to parse options " << info
- << ": " << status.toString());
+ << ": "
+ << status.toString());
}
} else {
// Use default options.
@@ -467,19 +468,19 @@ void syncFixUp(OperationContext* txn,
auto status = collection->setValidator(txn, options.validator);
if (!status.isOK()) {
- throw RSFatalException(str::stream()
- << "Failed to set validator: " << status.toString());
+ throw RSFatalException(str::stream() << "Failed to set validator: "
+ << status.toString());
}
status = collection->setValidationAction(txn, options.validationAction);
if (!status.isOK()) {
- throw RSFatalException(str::stream()
- << "Failed to set validationAction: " << status.toString());
+ throw RSFatalException(str::stream() << "Failed to set validationAction: "
+ << status.toString());
}
status = collection->setValidationLevel(txn, options.validationLevel);
if (!status.isOK()) {
- throw RSFatalException(str::stream()
- << "Failed to set validationLevel: " << status.toString());
+ throw RSFatalException(str::stream() << "Failed to set validationLevel: "
+ << status.toString());
}
wuow.commit();
@@ -822,7 +823,8 @@ Status _syncRollback(OperationContext* txn,
if (!replCoord->setFollowerMode(MemberState::RS_ROLLBACK)) {
return Status(ErrorCodes::OperationFailed,
str::stream() << "Cannot transition from "
- << replCoord->getMemberState().toString() << " to "
+ << replCoord->getMemberState().toString()
+ << " to "
<< MemberState(MemberState::RS_ROLLBACK).toString());
}
}
@@ -833,8 +835,9 @@ Status _syncRollback(OperationContext* txn,
{
log() << "rollback 2 FindCommonPoint";
try {
- auto processOperationForFixUp =
- [&how](const BSONObj& operation) { return refetch(how, operation); };
+ auto processOperationForFixUp = [&how](const BSONObj& operation) {
+ return refetch(how, operation);
+ };
auto res = syncRollBackLocalOperations(
localOplog, rollbackSource.getOplog(), processOperationForFixUp);
if (!res.isOK()) {
@@ -856,7 +859,8 @@ Status _syncRollback(OperationContext* txn,
return Status(ErrorCodes::UnrecoverableRollbackError,
str::stream()
<< "need to rollback, but unable to determine common point between"
- " local and remote oplog: " << e.what(),
+ " local and remote oplog: "
+ << e.what(),
18752);
} catch (const DBException& e) {
warning() << "rollback 2 exception " << e.toString() << "; sleeping 1 min";
@@ -912,11 +916,9 @@ Status syncRollback(OperationContext* txn,
const OplogInterface& localOplog,
const RollbackSource& rollbackSource,
ReplicationCoordinator* replCoord) {
- return syncRollback(txn,
- localOplog,
- rollbackSource,
- replCoord,
- [](Seconds seconds) { sleepsecs(durationCount<Seconds>(seconds)); });
+ return syncRollback(txn, localOplog, rollbackSource, replCoord, [](Seconds seconds) {
+ sleepsecs(durationCount<Seconds>(seconds));
+ });
}
} // namespace repl
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index 5387be5eb7c..096e3902bed 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -199,7 +199,8 @@ TEST_F(RSRollbackTest, SetFollowerModeFailed) {
RollbackSourceMock(std::unique_ptr<OplogInterface>(
new OplogInterfaceMock(kEmptyMockOperations))),
_coordinator,
- noSleep).code());
+ noSleep)
+ .code());
}
TEST_F(RSRollbackTest, OplogStartMissing) {
@@ -214,7 +215,8 @@ TEST_F(RSRollbackTest, OplogStartMissing) {
operation,
}))),
_coordinator,
- noSleep).code());
+ noSleep)
+ .code());
}
TEST_F(RSRollbackTest, NoRemoteOpLog) {
@@ -307,7 +309,8 @@ int _testRollbackDelete(OperationContext* txn,
<< "d"
<< "ns"
<< "test.t"
- << "o" << BSON("_id" << 0)),
+ << "o"
+ << BSON("_id" << 0)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -383,7 +386,8 @@ TEST_F(RSRollbackTest, RollbackInsertDocumentWithNoId) {
<< "i"
<< "ns"
<< "test.t"
- << "o" << BSON("a" << 1)),
+ << "o"
+ << BSON("a" << 1)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -419,7 +423,9 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
auto collection = _createCollection(_txn.get(), "test.t", CollectionOptions());
auto indexSpec = BSON("ns"
<< "test.t"
- << "key" << BSON("a" << 1) << "name"
+ << "key"
+ << BSON("a" << 1)
+ << "name"
<< "a_1");
{
Lock::DBLock dbLock(_txn->lockState(), "test", MODE_X);
@@ -439,7 +445,8 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
<< "i"
<< "ns"
<< "test.system.indexes"
- << "o" << indexSpec),
+ << "o"
+ << indexSpec),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -483,7 +490,9 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
auto collection = _createCollection(_txn.get(), "test.t", CollectionOptions());
auto indexSpec = BSON("ns"
<< "test.t"
- << "key" << BSON("a" << 1) << "name"
+ << "key"
+ << BSON("a" << 1)
+ << "name"
<< "a_1");
// Skip index creation to trigger warning during rollback.
{
@@ -499,7 +508,8 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
<< "i"
<< "ns"
<< "test.system.indexes"
- << "o" << indexSpec),
+ << "o"
+ << indexSpec),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -545,8 +555,9 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingNamespace) {
<< "i"
<< "ns"
<< "test.system.indexes"
- << "o" << BSON("key" << BSON("a" << 1) << "name"
- << "a_1")),
+ << "o"
+ << BSON("key" << BSON("a" << 1) << "name"
+ << "a_1")),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -587,10 +598,13 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandInvalidNamespace) {
<< "i"
<< "ns"
<< "test.system.indexes"
- << "o" << BSON("ns"
- << "test."
- << "key" << BSON("a" << 1) << "name"
- << "a_1")),
+ << "o"
+ << BSON("ns"
+ << "test."
+ << "key"
+ << BSON("a" << 1)
+ << "name"
+ << "a_1")),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -631,9 +645,11 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) {
<< "i"
<< "ns"
<< "test.system.indexes"
- << "o" << BSON("ns"
- << "test.t"
- << "key" << BSON("a" << 1))),
+ << "o"
+ << BSON("ns"
+ << "test.t"
+ << "key"
+ << BSON("a" << 1))),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -673,8 +689,9 @@ TEST_F(RSRollbackTest, RollbackUnknownCommand) {
<< "c"
<< "ns"
<< "test.t"
- << "o" << BSON("unknown_command"
- << "t")),
+ << "o"
+ << BSON("unknown_command"
+ << "t")),
RecordId(2));
{
Lock::DBLock dbLock(_txn->lockState(), "test", MODE_X);
@@ -705,8 +722,9 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommand) {
<< "c"
<< "ns"
<< "test.t"
- << "o" << BSON("drop"
- << "t")),
+ << "o"
+ << BSON("drop"
+ << "t")),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -787,24 +805,30 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
<< "u"
<< "ns"
<< "test.t"
- << "o2" << BSON("_id" << 1) << "o"
+ << "o2"
+ << BSON("_id" << 1)
+ << "o"
<< BSON("_id" << 1 << "v" << 2)),
BSON("op"
<< "u"
<< "ns"
<< "test.t"
- << "o2" << BSON("_id" << 2) << "o"
+ << "o2"
+ << BSON("_id" << 2)
+ << "o"
<< BSON("_id" << 2 << "v" << 4)),
BSON("op"
<< "d"
<< "ns"
<< "test.t"
- << "o" << BSON("_id" << 3)),
+ << "o"
+ << BSON("_id" << 3)),
BSON("op"
<< "i"
<< "ns"
<< "test.t"
- << "o" << BSON("_id" << 4))}),
+ << "o"
+ << BSON("_id" << 4))}),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
@@ -870,8 +894,9 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
<< "c"
<< "ns"
<< "test.t"
- << "o" << BSON("create"
- << "t")),
+ << "o"
+ << BSON("create"
+ << "t")),
RecordId(2));
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
@@ -899,9 +924,11 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommand) {
<< "c"
<< "ns"
<< "test.t"
- << "o" << BSON("collMod"
- << "t"
- << "noPadding" << false)),
+ << "o"
+ << BSON("collMod"
+ << "t"
+ << "noPadding"
+ << false)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -940,9 +967,11 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommandInvalidCollectionOpt
<< "c"
<< "ns"
<< "test.t"
- << "o" << BSON("collMod"
- << "t"
- << "noPadding" << false)),
+ << "o"
+ << BSON("collMod"
+ << "t"
+ << "noPadding"
+ << false)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp
index cc8eaa320e5..455235e519c 100644
--- a/src/mongo/db/repl/rs_sync.cpp
+++ b/src/mongo/db/repl/rs_sync.cpp
@@ -41,8 +41,8 @@
#include "mongo/db/client.h"
#include "mongo/db/commands/fsync.h"
#include "mongo/db/commands/server_status.h"
-#include "mongo/db/curop.h"
#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/curop.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/bgsync.h"
#include "mongo/db/repl/optime.h"
diff --git a/src/mongo/db/repl/rs_sync.h b/src/mongo/db/repl/rs_sync.h
index ec174268b5c..513c6265657 100644
--- a/src/mongo/db/repl/rs_sync.h
+++ b/src/mongo/db/repl/rs_sync.h
@@ -32,10 +32,10 @@
#include <vector>
#include "mongo/db/client.h"
-#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/initial_sync.h"
#include "mongo/db/repl/sync_tail.h"
+#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/util/concurrency/old_thread_pool.h"
namespace mongo {
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index ef9935ae86f..3f3267158d8 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -154,7 +154,8 @@ void StorageInterfaceImpl::setMinValid(OperationContext* txn,
txn,
_minValidNss.ns().c_str(),
BSON("$set" << BSON("ts" << endOpTime.getTimestamp() << "t" << endOpTime.getTerm())
- << "$unset" << BSON(kBeginFieldName << 1)));
+ << "$unset"
+ << BSON(kBeginFieldName << 1)));
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
txn, "StorageInterfaceImpl::setMinValid", _minValidNss.ns());
@@ -174,7 +175,8 @@ void StorageInterfaceImpl::setMinValid(OperationContext* txn, const BatchBoundar
Helpers::putSingleton(txn,
_minValidNss.ns().c_str(),
BSON("$set" << BSON("ts" << end.getTimestamp() << "t" << end.getTerm()
- << kBeginFieldName << start.toBSON())));
+ << kBeginFieldName
+ << start.toBSON())));
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
txn, "StorageInterfaceImpl::setMinValid", _minValidNss.ns());
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 411b3fb0133..ed05b1fc48b 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -33,11 +33,11 @@
#include "mongo/db/repl/sync_source_feedback.h"
#include "mongo/db/client.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/repl/bgsync.h"
#include "mongo/db/repl/replica_set_config.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/reporter.h"
-#include "mongo/db/operation_context.h"
#include "mongo/executor/network_interface_factory.h"
#include "mongo/executor/network_interface_thread_pool.h"
#include "mongo/executor/thread_pool_task_executor.h"
@@ -75,25 +75,24 @@ Milliseconds calculateKeepAliveInterval(OperationContext* txn, stdx::mutex& mtx)
*/
Reporter::PrepareReplSetUpdatePositionCommandFn makePrepareReplSetUpdatePositionCommandFn(
OperationContext* txn, stdx::mutex& mtx, const HostAndPort& syncTarget) {
- return [&mtx, syncTarget, txn](
- ReplicationCoordinator::ReplSetUpdatePositionCommandStyle commandStyle)
- -> StatusWith<BSONObj> {
- auto currentSyncTarget = BackgroundSync::get()->getSyncTarget();
- if (currentSyncTarget != syncTarget) {
- // Change in sync target
- return Status(ErrorCodes::InvalidSyncSource, "Sync target is no longer valid");
- }
+ return [&mtx, syncTarget, txn](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle
+ commandStyle) -> StatusWith<BSONObj> {
+ auto currentSyncTarget = BackgroundSync::get()->getSyncTarget();
+ if (currentSyncTarget != syncTarget) {
+ // Change in sync target
+ return Status(ErrorCodes::InvalidSyncSource, "Sync target is no longer valid");
+ }
- stdx::lock_guard<stdx::mutex> lock(mtx);
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- if (replCoord->getMemberState().primary()) {
- // Primary has no one to send updates to.
- return Status(ErrorCodes::InvalidSyncSource,
- "Currently primary - no one to send updates to");
- }
+ stdx::lock_guard<stdx::mutex> lock(mtx);
+ auto replCoord = repl::ReplicationCoordinator::get(txn);
+ if (replCoord->getMemberState().primary()) {
+ // Primary has no one to send updates to.
+ return Status(ErrorCodes::InvalidSyncSource,
+ "Currently primary - no one to send updates to");
+ }
- return replCoord->prepareReplSetUpdatePositionCommand(commandStyle);
- };
+ return replCoord->prepareReplSetUpdatePositionCommand(commandStyle);
+ };
}
} // namespace
@@ -254,7 +253,8 @@ void SyncSourceFeedback::run() {
auto status = _updateUpstream(txn.get());
if (!status.isOK()) {
LOG(1) << "The replication progress command (replSetUpdatePosition) failed and will be "
- "retried: " << status;
+ "retried: "
+ << status;
}
}
}
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index ebaff27d29c..d8bcb020c50 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -33,9 +33,9 @@
#include "mongo/db/repl/sync_tail.h"
+#include "third_party/murmurhash3/MurmurHash3.h"
#include <boost/functional/hash.hpp>
#include <memory>
-#include "third_party/murmurhash3/MurmurHash3.h"
#include "mongo/base/counter.h"
#include "mongo/db/auth/authorization_session.h"
@@ -749,7 +749,8 @@ void SyncTail::oplogApplication() {
str::stream() << "Attempted to apply an oplog entry ("
<< lastOpTime.toString()
<< ") which is not greater than our lastWrittenOptime ("
- << lastWriteOpTime.toString() << ")."));
+ << lastWriteOpTime.toString()
+ << ")."));
}
handleSlaveDelay(lastOpTime.getTimestamp());
@@ -1029,9 +1030,7 @@ void multiSyncApply(const std::vector<OplogEntry>& ops, SyncTail*) {
int batchSize = 0;
int batchCount = 0;
auto endOfGroupableOpsIterator = std::find_if(
- oplogEntriesIterator + 1,
- oplogEntryPointers.end(),
- [&](OplogEntry* nextEntry) {
+ oplogEntriesIterator + 1, oplogEntryPointers.end(), [&](OplogEntry* nextEntry) {
return nextEntry->opType[0] != 'i' || // Must be an insert.
nextEntry->ns != entry->ns || // Must be the same namespace.
// Must not create too large an object.
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index f81852dad0f..087889ed5ac 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -160,11 +160,11 @@ TEST_F(SyncTailTest, SyncApplyNoOp) {
ASSERT_FALSE(convertUpdateToUpsert);
return Status::OK();
};
- SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation) {
- FAIL("applyCommand unexpectedly invoked.");
- return Status::OK();
- };
+ SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
+ const BSONObj& theOperation) {
+ FAIL("applyCommand unexpectedly invoked.");
+ return Status::OK();
+ };
ASSERT_TRUE(_txn->writesAreReplicated());
ASSERT_FALSE(documentValidationDisabled(_txn.get()));
ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
@@ -188,11 +188,11 @@ TEST_F(SyncTailTest, SyncApplyNoOpApplyOpThrowsException) {
}
return Status::OK();
};
- SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation) {
- FAIL("applyCommand unexpectedly invoked.");
- return Status::OK();
- };
+ SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
+ const BSONObj& theOperation) {
+ FAIL("applyCommand unexpectedly invoked.");
+ return Status::OK();
+ };
ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
ASSERT_EQUALS(5, applyOpCalled);
}
@@ -219,11 +219,11 @@ void SyncTailTest::_testSyncApplyInsertDocument(LockMode expectedMode) {
ASSERT_TRUE(convertUpdateToUpsert);
return Status::OK();
};
- SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation) {
- FAIL("applyCommand unexpectedly invoked.");
- return Status::OK();
- };
+ SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
+ const BSONObj& theOperation) {
+ FAIL("applyCommand unexpectedly invoked.");
+ return Status::OK();
+ };
ASSERT_TRUE(_txn->writesAreReplicated());
ASSERT_FALSE(documentValidationDisabled(_txn.get()));
ASSERT_OK(SyncTail::syncApply(_txn.get(), op, true, applyOp, applyCmd, _incOps));
@@ -279,11 +279,11 @@ TEST_F(SyncTailTest, SyncApplyIndexBuild) {
ASSERT_FALSE(convertUpdateToUpsert);
return Status::OK();
};
- SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation) {
- FAIL("applyCommand unexpectedly invoked.");
- return Status::OK();
- };
+ SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
+ const BSONObj& theOperation) {
+ FAIL("applyCommand unexpectedly invoked.");
+ return Status::OK();
+ };
ASSERT_TRUE(_txn->writesAreReplicated());
ASSERT_FALSE(documentValidationDisabled(_txn.get()));
ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
@@ -304,16 +304,16 @@ TEST_F(SyncTailTest, SyncApplyCommand) {
FAIL("applyOperation unexpectedly invoked.");
return Status::OK();
};
- SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation) {
- applyCmdCalled = true;
- ASSERT_TRUE(txn);
- ASSERT_TRUE(txn->lockState()->isW());
- ASSERT_TRUE(txn->writesAreReplicated());
- ASSERT_FALSE(documentValidationDisabled(txn));
- ASSERT_EQUALS(op, theOperation);
- return Status::OK();
- };
+ SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
+ const BSONObj& theOperation) {
+ applyCmdCalled = true;
+ ASSERT_TRUE(txn);
+ ASSERT_TRUE(txn->lockState()->isW());
+ ASSERT_TRUE(txn->writesAreReplicated());
+ ASSERT_FALSE(documentValidationDisabled(txn));
+ ASSERT_EQUALS(op, theOperation);
+ return Status::OK();
+ };
ASSERT_TRUE(_txn->writesAreReplicated());
ASSERT_FALSE(documentValidationDisabled(_txn.get()));
ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
@@ -335,14 +335,14 @@ TEST_F(SyncTailTest, SyncApplyCommandThrowsException) {
FAIL("applyOperation unexpectedly invoked.");
return Status::OK();
};
- SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation) {
- applyCmdCalled++;
- if (applyCmdCalled < 5) {
- throw WriteConflictException();
- }
- return Status::OK();
- };
+ SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
+ const BSONObj& theOperation) {
+ applyCmdCalled++;
+ if (applyCmdCalled < 5) {
+ throw WriteConflictException();
+ }
+ return Status::OK();
+ };
ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
ASSERT_EQUALS(5, applyCmdCalled);
ASSERT_EQUALS(1U, _opsApplied);
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index 1558e88929a..5cb02e05c0e 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -43,8 +43,8 @@
#include "mongo/util/concurrency/old_thread_pool.h"
#include "mongo/util/concurrency/thread_name.h"
#include "mongo/util/destructor_guard.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
namespace repl {
diff --git a/src/mongo/db/repl/topology_coordinator.h b/src/mongo/db/repl/topology_coordinator.h
index 27242c393d8..1548cb774a9 100644
--- a/src/mongo/db/repl/topology_coordinator.h
+++ b/src/mongo/db/repl/topology_coordinator.h
@@ -28,8 +28,8 @@
#pragma once
-#include <string>
#include <iosfwd>
+#include <string>
#include "mongo/base/disallow_copying.h"
#include "mongo/db/repl/repl_set_heartbeat_response.h"
diff --git a/src/mongo/db/repl/topology_coordinator_impl.cpp b/src/mongo/db/repl/topology_coordinator_impl.cpp
index 1cfaee288d4..b72fe47f524 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl.cpp
@@ -47,8 +47,8 @@
#include "mongo/db/repl/repl_set_request_votes_args.h"
#include "mongo/db/repl/replication_executor.h"
#include "mongo/db/repl/rslog.h"
-#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
+#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/util/hex.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -466,14 +466,16 @@ void TopologyCoordinatorImpl::prepareFreshResponse(
*result =
Status(ErrorCodes::ReplicaSetNotFound,
str::stream() << "Wrong repl set name. Expected: " << _rsConfig.getReplSetName()
- << ", received: " << args.setName);
+ << ", received: "
+ << args.setName);
return;
}
if (args.id == static_cast<unsigned>(_selfConfig().getId())) {
*result = Status(ErrorCodes::BadValue,
str::stream() << "Received replSetFresh command from member with the "
- "same member ID as ourself: " << args.id);
+ "same member ID as ourself: "
+ << args.id);
return;
}
@@ -606,7 +608,8 @@ void TopologyCoordinatorImpl::prepareElectResponse(
} else if (myver > args.cfgver) {
// they are stale!
log() << "replSetElect command received stale config version # during election. "
- "Our version: " << myver << ", their version: " << args.cfgver;
+ "Our version: "
+ << myver << ", their version: " << args.cfgver;
vote = -10000;
} else if (!hopeful) {
log() << "replSetElect couldn't find member with id " << args.whoid;
@@ -666,7 +669,8 @@ Status TopologyCoordinatorImpl::prepareHeartbeatResponse(Date_t now,
response->noteMismatched();
return Status(ErrorCodes::InconsistentReplicaSetNames,
str::stream() << "Our set name of " << ourSetName << " does not match name "
- << rshb << " reported by remote node");
+ << rshb
+ << " reported by remote node");
}
const MemberState myState = getMemberState();
@@ -680,7 +684,8 @@ Status TopologyCoordinatorImpl::prepareHeartbeatResponse(Date_t now,
if (args.getSenderId() == _selfConfig().getId()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Received heartbeat from member with the same "
- "member ID as ourself: " << args.getSenderId());
+ "member ID as ourself: "
+ << args.getSenderId());
}
}
@@ -752,7 +757,8 @@ Status TopologyCoordinatorImpl::prepareHeartbeatResponseV1(Date_t now,
<< "; remote node's: " << rshb;
return Status(ErrorCodes::InconsistentReplicaSetNames,
str::stream() << "Our set name of " << ourSetName << " does not match name "
- << rshb << " reported by remote node");
+ << rshb
+ << " reported by remote node");
}
const MemberState myState = getMemberState();
@@ -765,7 +771,8 @@ Status TopologyCoordinatorImpl::prepareHeartbeatResponseV1(Date_t now,
if (args.getSenderId() == _selfConfig().getId()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Received heartbeat from member with the same "
- "member ID as ourself: " << args.getSenderId());
+ "member ID as ourself: "
+ << args.getSenderId());
}
}
@@ -988,7 +995,8 @@ HeartbeatResponseAction TopologyCoordinatorImpl::processHeartbeatResponse(
const int memberIndex = _rsConfig.findMemberIndexByHostAndPort(target);
if (memberIndex == -1) {
LOG(1) << "Could not find " << target << " in current config so ignoring --"
- " current config: " << _rsConfig.toBSON();
+ " current config: "
+ << _rsConfig.toBSON();
HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeNoAction();
nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
return nextAction;
@@ -1001,7 +1009,8 @@ HeartbeatResponseAction TopologyCoordinatorImpl::processHeartbeatResponse(
if (!hbResponse.isOK()) {
if (isUnauthorized) {
LOG(1) << "setAuthIssue: heartbeat response failed due to authentication"
- " issue for member _id:" << member.getId();
+ " issue for member _id:"
+ << member.getId();
hbData.setAuthIssue(now);
} else if (hbStats.getNumFailuresSinceLastStart() > kMaxHeartbeatRetries ||
alreadyElapsed >= _rsConfig.getHeartbeatTimeoutPeriod()) {
@@ -1319,7 +1328,8 @@ Status TopologyCoordinatorImpl::checkShouldStandForElection(Date_t now,
return {ErrorCodes::NodeNotElectable,
str::stream() << "Not standing for election because "
<< _getUnelectableReasonString(unelectableReason)
- << "; my last optime is " << lastOpApplied.toString()
+ << "; my last optime is "
+ << lastOpApplied.toString()
<< " and the newest is "
<< _latestKnownOpTime(lastOpApplied).toString()};
}
diff --git a/src/mongo/db/repl/topology_coordinator_impl_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
index 6d1e9f08a97..030142c628b 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
@@ -244,12 +244,15 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -309,26 +312,41 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself")
<< BSON("_id" << 10 << "host"
<< "h1")
<< BSON("_id" << 20 << "host"
<< "h2"
- << "buildIndexes" << false << "priority" << 0)
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0)
<< BSON("_id" << 30 << "host"
<< "h3"
- << "hidden" << true << "priority" << 0 << "votes"
- << 0) << BSON("_id" << 40 << "host"
- << "h4"
- << "arbiterOnly" << true)
+ << "hidden"
+ << true
+ << "priority"
+ << 0
+ << "votes"
+ << 0)
+ << BSON("_id" << 40 << "host"
+ << "h4"
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 50 << "host"
<< "h5"
- << "slaveDelay" << 1 << "priority" << 0)
+ << "slaveDelay"
+ << 1
+ << "priority"
+ << 0)
<< BSON("_id" << 60 << "host"
- << "h6") << BSON("_id" << 70 << "host"
- << "hprimary"))),
+ << "h6")
+ << BSON("_id" << 70 << "host"
+ << "hprimary"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -460,13 +478,17 @@ TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
+ << "version"
+ << 1
+ << "settings"
+ << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -512,12 +534,11 @@ TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
}
TEST_F(TopoCoordTest, ChooseOnlyVotersAsSyncSourceWhenNodeIsAVoter) {
- updateConfig(fromjson(
- "{_id:'rs0', version:1, members:["
- "{_id:10, host:'hself'}, "
- "{_id:20, host:'h2', votes:0, priority:0}, "
- "{_id:30, host:'h3'} "
- "]}"),
+ updateConfig(fromjson("{_id:'rs0', version:1, members:["
+ "{_id:10, host:'hself'}, "
+ "{_id:20, host:'h2', votes:0, priority:0}, "
+ "{_id:30, host:'h3'} "
+ "]}"),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -550,12 +571,15 @@ TEST_F(TopoCoordTest, ChooseOnlyVotersAsSyncSourceWhenNodeIsAVoter) {
TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -597,12 +621,15 @@ TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimary) {
TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourceIsForciblySet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -650,12 +677,15 @@ TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourc
TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExpires) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -699,13 +729,17 @@ TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExp
TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
+ << "version"
+ << 1
+ << "settings"
+ << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -750,12 +784,15 @@ TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDis
TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -814,18 +851,22 @@ TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
TEST_F(TopoCoordTest, NodeDoesNotActOnHeartbeatsWhenAbsentFromConfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "h1")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
-1);
ASSERT_NO_ACTION(heartbeatFromMember(HostAndPort("h2"),
"rs0",
MemberState::RS_SECONDARY,
OpTime(Timestamp(1, 0), 0),
- Milliseconds(300)).getAction());
+ Milliseconds(300))
+ .getAction());
}
TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunPriorToHavingAConfig) {
@@ -852,10 +893,13 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstArbiter) {
// Test trying to sync from another node when we are an arbiter
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 1 << "host"
<< "h1"))),
0);
@@ -874,21 +918,29 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
// Try to sync while PRIMARY
@@ -911,21 +963,29 @@ TEST_F(TopoCoordTest, NodeReturnsNodeNotFoundWhenSyncFromRequestsANodeNotInConfi
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -944,21 +1004,29 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsSelf) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -977,21 +1045,29 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1011,21 +1087,29 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsAnIndexNonbui
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1045,21 +1129,29 @@ TEST_F(TopoCoordTest, NodeReturnsHostUnreachableWhenSyncFromRequestsADownNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1080,21 +1172,29 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAStaleNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1119,21 +1219,29 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAValidNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1159,21 +1267,29 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1198,21 +1314,29 @@ TEST_F(TopoCoordTest, NodeReturnsUnauthorizedWhenSyncFromRequestsANodeWeAreNotAu
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1233,12 +1357,11 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenAskedToSyncFromANonVoterAsAVo
BSONObjBuilder response;
// Test trying to sync from another node
- updateConfig(fromjson(
- "{_id:'rs0', version:1, members:["
- "{_id:0, host:'self'},"
- "{_id:1, host:'h1'},"
- "{_id:2, host:'h2', votes:0, priority:0}"
- "]}"),
+ updateConfig(fromjson("{_id:'rs0', version:1, members:["
+ "{_id:0, host:'self'},"
+ "{_id:1, host:'h1'},"
+ "{_id:2, host:'h2', votes:0, priority:0}"
+ "]}"),
0);
getTopoCoord().prepareSyncFromResponse(HostAndPort("h2"), ourOpTime, &response, &result);
@@ -1257,21 +1380,29 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1328,17 +1459,17 @@ TEST_F(TopoCoordTest, ReplSetGetStatus) {
hb.setDurableOpTime(oplogDurable);
StatusWith<ReplSetHeartbeatResponse> hbResponseGood = StatusWith<ReplSetHeartbeatResponse>(hb);
- updateConfig(
- BSON("_id" << setName << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test0:1234")
- << BSON("_id" << 1 << "host"
- << "test1:1234") << BSON("_id" << 2 << "host"
- << "test2:1234")
- << BSON("_id" << 3 << "host"
- << "test3:1234"))),
- 3,
- startupTime + Milliseconds(1));
+ updateConfig(BSON("_id" << setName << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test0:1234")
+ << BSON("_id" << 1 << "host"
+ << "test1:1234")
+ << BSON("_id" << 2 << "host"
+ << "test2:1234")
+ << BSON("_id" << 3 << "host"
+ << "test3:1234"))),
+ 3,
+ startupTime + Milliseconds(1));
// Now that the replica set is setup, put the members into the states we want them in.
HostAndPort member = HostAndPort("test0:1234");
@@ -1467,15 +1598,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidReplicaSetConfigInResponseToGetStatusWhe
OpTime oplogProgress(Timestamp(3, 4), 0);
std::string setName = "mySet";
- updateConfig(
- BSON("_id" << setName << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test0:1234")
- << BSON("_id" << 1 << "host"
- << "test1:1234") << BSON("_id" << 2 << "host"
- << "test2:1234"))),
- -1, // This one is not part of the replica set.
- startupTime + Milliseconds(1));
+ updateConfig(BSON("_id" << setName << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test0:1234")
+ << BSON("_id" << 1 << "host"
+ << "test1:1234")
+ << BSON("_id" << 2 << "host"
+ << "test2:1234"))),
+ -1, // This one is not part of the replica set.
+ startupTime + Milliseconds(1));
BSONObjBuilder statusBuilder;
Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
@@ -1519,16 +1650,21 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1551,16 +1687,21 @@ TEST_F(TopoCoordTest, NodeReturnsFresherWhenFreshnessIsCheckedWithStaleConfigVer
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1592,16 +1733,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedWithAMemberWhoIsNotInTheConfig)
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1632,16 +1778,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedWhilePrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1677,16 +1828,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedWhilePrimaryExists) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1725,16 +1881,21 @@ TEST_F(TopoCoordTest, NodeReturnsNotFreshestWhenFreshnessIsCheckedByALowPriority
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1772,16 +1933,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedByANodeWeBelieveToBeDown) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1822,16 +1988,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedByANodeThatIsPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1871,16 +2042,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedByANodeThatIsInStartup) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1918,16 +2094,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedByANodeThatIsRecovering) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1966,16 +2147,21 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
// Test trying to elect a node that is fresher but lower priority than the existing primary
args.setName = "rs0";
@@ -2010,16 +2196,21 @@ TEST_F(TopoCoordTest, RespondPositivelyWhenFreshnessIsCheckedByAnElectableNode)
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -2055,16 +2246,21 @@ TEST_F(TopoCoordTest, NodeReturnsBadValueWhenFreshnessIsCheckedByANodeWithOurID)
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -2088,11 +2284,10 @@ TEST_F(TopoCoordTest, NodeReturnsBadValueWhenFreshnessIsCheckedByANodeWithOurID)
TEST_F(TopoCoordTest, HeartbeatFrequencyShouldBeHalfElectionTimeoutWhenArbiter) {
// This tests that arbiters issue heartbeats at electionTimeout/2 frequencies
TopoCoordTest::setUp();
- updateConfig(fromjson(
- "{_id:'mySet', version:1, protocolVersion:1, members:["
- "{_id:1, host:'node1:12345', arbiterOnly:true}, "
- "{_id:2, host:'node2:12345'}], "
- "settings:{heartbeatIntervalMillis:10, electionTimeoutMillis:5000}}"),
+ updateConfig(fromjson("{_id:'mySet', version:1, protocolVersion:1, members:["
+ "{_id:1, host:'node1:12345', arbiterOnly:true}, "
+ "{_id:2, host:'node2:12345'}], "
+ "settings:{heartbeatIntervalMillis:10, electionTimeoutMillis:5000}}"),
0);
HostAndPort target("host2", 27017);
Date_t requestDate = now();
@@ -2112,17 +2307,20 @@ class HeartbeatResponseTest : public TopoCoordTest {
public:
virtual void setUp() {
TopoCoordTest::setUp();
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 5 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
}
};
@@ -2338,13 +2536,16 @@ TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataSameConfig) {
ReplicaSetConfig originalConfig;
originalConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
- << "host3:27017")) << "settings"
+ << "host3:27017"))
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)));
ReplSetHeartbeatResponse sameConfigResponse;
@@ -2399,7 +2600,9 @@ TEST_F(HeartbeatResponseTestOneRetry, ReconfigWhenHeartbeatResponseContainsAConf
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 7 << "members"
+ << "version"
+ << 7
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2407,7 +2610,8 @@ TEST_F(HeartbeatResponseTestOneRetry, ReconfigWhenHeartbeatResponseContainsAConf
<< BSON("_id" << 2 << "host"
<< "host3:27017")
<< BSON("_id" << 3 << "host"
- << "host4:27017")) << "settings"
+ << "host4:27017"))
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 5))));
ASSERT_OK(newConfig.validate());
@@ -2580,7 +2784,9 @@ TEST_F(HeartbeatResponseTestTwoRetries, ReconfigWhenHeartbeatResponseContainsACo
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 7 << "members"
+ << "version"
+ << 7
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2588,7 +2794,8 @@ TEST_F(HeartbeatResponseTestTwoRetries, ReconfigWhenHeartbeatResponseContainsACo
<< BSON("_id" << 2 << "host"
<< "host3:27017")
<< BSON("_id" << 3 << "host"
- << "host4:27017")) << "settings"
+ << "host4:27017"))
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 5))));
ASSERT_OK(newConfig.validate());
@@ -2913,18 +3120,22 @@ TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataStepDownPrimaryForHighPriorityF
// In this test, the Topology coordinator sees a PRIMARY ("host2") and then sees a higher
// priority and similarly fresh node ("host3"). However, since the coordinator's node
// (host1) is not the higher priority node, it takes no action.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
setSelfMemberState(MemberState::RS_SECONDARY);
OpTime election = OpTime();
@@ -2957,18 +3168,22 @@ TEST_F(
//
// Despite having stepped down, we should remain electable, in order to dissuade lower
// priority nodes from standing for election.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
@@ -3000,18 +3215,22 @@ TEST_F(HeartbeatResponseTest,
NodeDoesNotStepDownSelfWhenHeartbeatResponseContainsALessFreshHigherPriorityNode) {
// In this test, the Topology coordinator becomes PRIMARY and then sees a higher priority
// and stale node ("host3"). As a result it responds with NoAction.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
OpTime staleTime = OpTime();
@@ -3028,18 +3247,22 @@ TEST_F(HeartbeatResponseTest,
NodeDoesNotStepDownRemoteWhenHeartbeatResponseContainsALessFreshHigherPriorityNode) {
// In this test, the Topology coordinator sees a PRIMARY ("host2") and then sees a higher
// priority and stale node ("host3"). As a result it responds with NoAction.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
setSelfMemberState(MemberState::RS_SECONDARY);
OpTime election = OpTime(Timestamp(1000, 0), 0);
@@ -3107,17 +3330,20 @@ TEST_F(HeartbeatResponseTest,
NodeDoesNotStandForElectionWhenPrimaryIsMarkedDownViaHeartbeatButWeHaveZeroPriority) {
setSelfMemberState(MemberState::RS_SECONDARY);
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 5 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"
- << "priority" << 0)
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority"
+ << 0)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
OpTime election = OpTime(Timestamp(400, 0), 0);
OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
@@ -3238,17 +3464,20 @@ TEST_F(HeartbeatResponseTest,
TEST_F(HeartbeatResponseTest,
NodeDoesNotStandForElectionWhenPrimaryIsMarkedDownViaHeartbeatButWeAreAnArbiter) {
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 5 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"
- << "arbiterOnly" << true)
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "arbiterOnly"
+ << true)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
OpTime election = OpTime(Timestamp(400, 0), 0);
OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
@@ -3693,25 +3922,40 @@ TEST_F(HeartbeatResponseTest,
StartElectionIfAMajorityOfVotersIsVisibleEvenThoughATrueMajorityIsNot) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 3 << "host"
<< "host4:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 4 << "host"
<< "host5:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 5 << "host"
<< "host6:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 6 << "host"
- << "host7:27017")) << "settings"
+ << "host7:27017"))
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -3793,16 +4037,21 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
- << "h1") << BSON("_id" << 2 << "host"
- << "h2"
- << "priority" << 10)
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "priority"
+ << 10)
<< BSON("_id" << 3 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
}
@@ -4079,9 +4328,8 @@ TEST_F(PrepareElectResponseTest,
ASSERT_EQUALS(0, response2["vote"].Int());
ASSERT_EQUALS(round, response2["round"].OID());
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "voting no for h3:27017; "
- "voted for h2:27017 0 secs ago"));
+ countLogLinesContaining("voting no for h3:27017; "
+ "voted for h2:27017 0 secs ago"));
// Test that after enough time passes the same vote can proceed
now += Seconds(30) + Milliseconds(1); // just over 30 seconds later
@@ -4100,7 +4348,9 @@ TEST_F(PrepareElectResponseTest,
TEST_F(TopoCoordTest, NodeReturnsReplicaSetNotFoundWhenReceivingElectCommandWhileRemoved) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -4109,7 +4359,9 @@ TEST_F(TopoCoordTest, NodeReturnsReplicaSetNotFoundWhenReceivingElectCommandWhil
// Reconfig to remove self.
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -4140,7 +4392,9 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -4211,8 +4465,11 @@ TEST_F(TopoCoordTest,
UnfreezeImmediatelyWhenToldToFreezeForZeroSecondsAfterBeingToldToFreezeForLonger) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -4231,12 +4488,15 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
}
@@ -4296,9 +4556,8 @@ TEST_F(PrepareHeartbeatResponseTest,
ASSERT(result.reason().find("repl set names do not match")) << "Actual string was \""
<< result.reason() << '"';
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "replSet set names do not match, ours: rs0; remote "
- "node's: rs1"));
+ countLogLinesContaining("replSet set names do not match, ours: rs0; remote "
+ "node's: rs1"));
ASSERT_TRUE(response.isMismatched());
ASSERT_EQUALS("", response.getHbMsg());
}
@@ -4541,8 +4800,11 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenBecomingSecondaryInSingleNodeSet) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "hself"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself"))),
0);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4559,10 +4821,13 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
ReplicaSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority" << 0))));
+ << "priority"
+ << 0))));
getTopoCoord().updateConfig(cfg, 0, now()++, OpTime());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4575,8 +4840,11 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "hself"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself"))),
0);
ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
}
@@ -4587,10 +4855,13 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
ReplicaSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority" << 0))));
+ << "priority"
+ << 0))));
getTopoCoord().updateConfig(cfg, 0, now()++, OpTime());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4608,7 +4879,9 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
// config to be absent from the set
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -4619,16 +4892,18 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s);
// reconfig to add to set
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
// having been added to the config, we should no longer be REMOVED and should enter STARTUP2
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4637,23 +4912,27 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfig) {
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -4669,8 +4948,11 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4685,7 +4967,9 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -4701,8 +4985,11 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4715,17 +5002,20 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
// now lose primary due to loss of electability
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"
- << "priority" << 0)
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority"
+ << 0)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
}
@@ -4735,8 +5025,11 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
@@ -4751,38 +5044,45 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
// Now reconfig in ways that leave us electable and ensure we are still the primary.
// Add hosts
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0,
- Date_t::fromMillisSinceEpoch(-1),
- OpTime(Timestamp(10, 0), 0));
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0,
+ Date_t::fromMillisSinceEpoch(-1),
+ OpTime(Timestamp(10, 0), 0));
ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
// Change priorities and tags
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"
- << "priority" << 10)
- << BSON("_id" << 1 << "host"
- << "host2:27017"
- << "priority" << 5 << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rack1")))),
- 0,
- Date_t::fromMillisSinceEpoch(-1),
- OpTime(Timestamp(10, 0), 0));
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority"
+ << 10)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017"
+ << "priority"
+ << 5
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rack1")))),
+ 0,
+ Date_t::fromMillisSinceEpoch(-1),
+ OpTime(Timestamp(10, 0), 0));
ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
}
@@ -4790,7 +5090,9 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
@@ -4802,16 +5104,18 @@ TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
// reconfig and stay secondary
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
}
@@ -4845,7 +5149,9 @@ TEST_F(HeartbeatResponseTest, ReconfigBetweenHeartbeatRequestAndRepsonse) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
@@ -4894,7 +5200,9 @@ TEST_F(HeartbeatResponseTest, ReconfigNodeRemovedBetweenHeartbeatRequestAndRepso
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -5148,14 +5456,19 @@ TEST_F(HeartbeatResponseTest, ShouldNotChangeSyncSourceWhenFresherMemberDoesNotB
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 6 << "members"
+ << "version"
+ << 6
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes" << false << "priority" << 0))),
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0))),
0);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
"rs0",
@@ -5189,15 +5502,23 @@ TEST_F(HeartbeatResponseTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 7 << "members"
+ << "version"
+ << 7
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "buildIndexes" << false << "priority" << 0)
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0)
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes" << false << "priority" << 0))),
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0))),
0);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
"rs0",
@@ -5225,12 +5546,15 @@ TEST_F(HeartbeatResponseTest,
TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileAwareOfPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5244,12 +5568,15 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileAwareOfPrimary) {
TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileTooStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5264,12 +5591,15 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileTooStale) {
TEST_F(TopoCoordTest, VoteForMyselfFailsWhileNotCandidate) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
ASSERT_FALSE(getTopoCoord().voteForMyself(now()++));
@@ -5278,13 +5608,17 @@ TEST_F(TopoCoordTest, VoteForMyselfFailsWhileNotCandidate) {
TEST_F(TopoCoordTest, NodeReturnsArbiterWhenGetMemberStateRunsAgainstArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
ASSERT_EQUALS(MemberState::RS_ARBITER, getTopoCoord().getMemberState().s);
}
@@ -5299,12 +5633,15 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileRemovedFromTheConfig) {
TEST_F(TopoCoordTest, ShouldNotStandForElectionWhenAPositiveResponseWasGivenInTheVoteLeasePeriod) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
heartbeatFromMember(
@@ -5339,20 +5676,28 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhenAPositiveResponseWasGivenInTh
TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -5362,11 +5707,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ASSERT_TRUE(response.getVoteGranted());
ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 1LL << "configVersion" << 1LL
- << "lastCommittedOp" << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response2;
// different candidate same term, should be a problem
@@ -5378,12 +5729,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5391,8 +5745,14 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 1LL
- << "candidateIndex" << 0LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5404,12 +5764,19 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// second dry run fine
ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "dryRun" << true << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
@@ -5420,12 +5787,15 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5433,8 +5803,14 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << false << "term" << 1LL
- << "candidateIndex" << 0LL << "configVersion" << 1LL
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5446,12 +5822,19 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
// dry post real, fails
ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "dryRun" << false << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
@@ -5462,12 +5845,15 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5475,8 +5861,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -5489,12 +5880,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5502,8 +5896,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 1LL << "candidateIndex" << 1LL
- << "configVersion" << 0LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 0LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -5516,12 +5915,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5533,8 +5935,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 1LL << "candidateIndex" << 1LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -5548,12 +5955,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5562,8 +5972,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 3LL << "candidateIndex" << 1LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 3LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime2 = {Timestamp(20, 0), 0};
@@ -5576,12 +5991,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -5589,12 +6007,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -5608,8 +6031,14 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "dryRun" << true << "term" << 2LL
- << "candidateIndex" << 0LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 2LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5623,12 +6052,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -5636,12 +6068,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -5655,8 +6092,14 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 2LL
- << "candidateIndex" << 1LL << "configVersion" << 0LL
+ << "dryRun"
+ << true
+ << "term"
+ << 2LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 0LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5670,12 +6113,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -5683,12 +6129,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -5701,8 +6152,14 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 0LL
- << "candidateIndex" << 1LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 0LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5716,12 +6173,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -5729,12 +6189,17 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -5748,8 +6213,14 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 1LL
- << "candidateIndex" << 1LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5763,12 +6234,15 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -5776,12 +6250,17 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -5795,8 +6274,14 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 3LL
- << "candidateIndex" << 1LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 3LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5818,12 +6303,17 @@ TEST_F(TopoCoordTest, CSRSConfigServerRejectsPV0Config) {
auto configObj = BSON("_id"
<< "rs0"
- << "version" << 1 << "configsvr" << true << "members"
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3")));
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3")));
ReplicaSetConfig config;
ASSERT_OK(config.initialize(configObj, false));
ASSERT_EQ(ErrorCodes::BadValue, config.validate());
diff --git a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
index 826905a860a..9e04bf8942d 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
@@ -255,12 +255,15 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -320,26 +323,41 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself")
<< BSON("_id" << 10 << "host"
<< "h1")
<< BSON("_id" << 20 << "host"
<< "h2"
- << "buildIndexes" << false << "priority" << 0)
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0)
<< BSON("_id" << 30 << "host"
<< "h3"
- << "hidden" << true << "priority" << 0 << "votes"
- << 0) << BSON("_id" << 40 << "host"
- << "h4"
- << "arbiterOnly" << true)
+ << "hidden"
+ << true
+ << "priority"
+ << 0
+ << "votes"
+ << 0)
+ << BSON("_id" << 40 << "host"
+ << "h4"
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 50 << "host"
<< "h5"
- << "slaveDelay" << 1 << "priority" << 0)
+ << "slaveDelay"
+ << 1
+ << "priority"
+ << 0)
<< BSON("_id" << 60 << "host"
- << "h6") << BSON("_id" << 70 << "host"
- << "hprimary"))),
+ << "h6")
+ << BSON("_id" << 70 << "host"
+ << "hprimary"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -471,13 +489,17 @@ TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
+ << "version"
+ << 1
+ << "settings"
+ << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -523,12 +545,11 @@ TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
}
TEST_F(TopoCoordTest, ChooseOnlyVotersAsSyncSourceWhenNodeIsAVoter) {
- updateConfig(fromjson(
- "{_id:'rs0', version:1, members:["
- "{_id:10, host:'hself'}, "
- "{_id:20, host:'h2', votes:0, priority:0}, "
- "{_id:30, host:'h3'} "
- "]}"),
+ updateConfig(fromjson("{_id:'rs0', version:1, members:["
+ "{_id:10, host:'hself'}, "
+ "{_id:20, host:'h2', votes:0, priority:0}, "
+ "{_id:30, host:'h3'} "
+ "]}"),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -561,12 +582,15 @@ TEST_F(TopoCoordTest, ChooseOnlyVotersAsSyncSourceWhenNodeIsAVoter) {
TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -608,12 +632,15 @@ TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimary) {
TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourceIsForciblySet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -651,12 +678,15 @@ TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourc
TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExpires) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -700,13 +730,17 @@ TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExp
TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
+ << "version"
+ << 1
+ << "settings"
+ << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -751,12 +785,15 @@ TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDis
TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -815,18 +852,22 @@ TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
TEST_F(TopoCoordTest, NodeDoesNotActOnHeartbeatsWhenAbsentFromConfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "h1")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
-1);
ASSERT_NO_ACTION(heartbeatFromMember(HostAndPort("h2"),
"rs0",
MemberState::RS_SECONDARY,
OpTime(Timestamp(1, 0), 0),
- Milliseconds(300)).getAction());
+ Milliseconds(300))
+ .getAction());
}
TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunPriorToHavingAConfig) {
@@ -853,10 +894,13 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstArbiter) {
// Test trying to sync from another node when we are an arbiter
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 1 << "host"
<< "h1"))),
0);
@@ -875,21 +919,29 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
// Try to sync while PRIMARY
@@ -912,21 +964,29 @@ TEST_F(TopoCoordTest, NodeReturnsNodeNotFoundWhenSyncFromRequestsANodeNotInConfi
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -945,21 +1005,29 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsSelf) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -978,21 +1046,29 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1012,21 +1088,29 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsAnIndexNonbui
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1046,21 +1130,29 @@ TEST_F(TopoCoordTest, NodeReturnsHostUnreachableWhenSyncFromRequestsADownNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1081,21 +1173,29 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAStaleNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1120,21 +1220,29 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAValidNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1160,21 +1268,29 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1199,21 +1315,29 @@ TEST_F(TopoCoordTest, NodeReturnsUnauthorizedWhenSyncFromRequestsANodeWeAreNotAu
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1234,12 +1358,11 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenAskedToSyncFromANonVoterAsAVo
BSONObjBuilder response;
// Test trying to sync from another node
- updateConfig(fromjson(
- "{_id:'rs0', version:1, members:["
- "{_id:0, host:'self'},"
- "{_id:1, host:'h1'},"
- "{_id:2, host:'h2', votes:0, priority:0}"
- "]}"),
+ updateConfig(fromjson("{_id:'rs0', version:1, members:["
+ "{_id:0, host:'self'},"
+ "{_id:1, host:'h1'},"
+ "{_id:2, host:'h2', votes:0, priority:0}"
+ "]}"),
0);
getTopoCoord().prepareSyncFromResponse(HostAndPort("h2"), ourOpTime, &response, &result);
@@ -1258,21 +1381,29 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1329,17 +1460,17 @@ TEST_F(TopoCoordTest, ReplSetGetStatus) {
hb.setAppliedOpTime(oplogProgress);
StatusWith<ReplSetHeartbeatResponse> hbResponseGood = StatusWith<ReplSetHeartbeatResponse>(hb);
- updateConfig(
- BSON("_id" << setName << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test0:1234")
- << BSON("_id" << 1 << "host"
- << "test1:1234") << BSON("_id" << 2 << "host"
- << "test2:1234")
- << BSON("_id" << 3 << "host"
- << "test3:1234"))),
- 3,
- startupTime + Milliseconds(1));
+ updateConfig(BSON("_id" << setName << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test0:1234")
+ << BSON("_id" << 1 << "host"
+ << "test1:1234")
+ << BSON("_id" << 2 << "host"
+ << "test2:1234")
+ << BSON("_id" << 3 << "host"
+ << "test3:1234"))),
+ 3,
+ startupTime + Milliseconds(1));
// Now that the replica set is setup, put the members into the states we want them in.
HostAndPort member = HostAndPort("test0:1234");
@@ -1468,15 +1599,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidReplicaSetConfigInResponseToGetStatusWhe
OpTime oplogProgress(Timestamp(3, 4), 0);
std::string setName = "mySet";
- updateConfig(
- BSON("_id" << setName << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test0:1234")
- << BSON("_id" << 1 << "host"
- << "test1:1234") << BSON("_id" << 2 << "host"
- << "test2:1234"))),
- -1, // This one is not part of the replica set.
- startupTime + Milliseconds(1));
+ updateConfig(BSON("_id" << setName << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test0:1234")
+ << BSON("_id" << 1 << "host"
+ << "test1:1234")
+ << BSON("_id" << 2 << "host"
+ << "test2:1234"))),
+ -1, // This one is not part of the replica set.
+ startupTime + Milliseconds(1));
BSONObjBuilder statusBuilder;
Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
@@ -1497,11 +1628,10 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidReplicaSetConfigInResponseToGetStatusWhe
TEST_F(TopoCoordTest, HeartbeatFrequencyShouldBeHalfElectionTimeoutWhenArbiter) {
// This tests that arbiters issue heartbeats at electionTimeout/2 frequencies
TopoCoordTest::setUp();
- updateConfig(fromjson(
- "{_id:'mySet', version:1, protocolVersion:1, members:["
- "{_id:1, host:'node1:12345', arbiterOnly:true}, "
- "{_id:2, host:'node2:12345'}], "
- "settings:{heartbeatIntervalMillis:10, electionTimeoutMillis:5000}}"),
+ updateConfig(fromjson("{_id:'mySet', version:1, protocolVersion:1, members:["
+ "{_id:1, host:'node1:12345', arbiterOnly:true}, "
+ "{_id:2, host:'node2:12345'}], "
+ "settings:{heartbeatIntervalMillis:10, electionTimeoutMillis:5000}}"),
0);
HostAndPort target("host2", 27017);
Date_t requestDate = now();
@@ -1523,12 +1653,16 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3")) << "settings"
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))
+ << "settings"
<< BSON("protocolVersion" << 1)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1558,9 +1692,8 @@ TEST_F(PrepareHeartbeatResponseV1Test,
ASSERT(result.reason().find("repl set names do not match")) << "Actual string was \""
<< result.reason() << '"';
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "replSet set names do not match, ours: rs0; remote "
- "node's: rs1"));
+ countLogLinesContaining("replSet set names do not match, ours: rs0; remote "
+ "node's: rs1"));
// only protocolVersion should be set in this failure case
ASSERT_EQUALS("", response.getReplicaSetName());
}
@@ -1570,11 +1703,15 @@ TEST_F(PrepareHeartbeatResponseV1Test,
// reconfig self out of set
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 3 << "members" << BSON_ARRAY(BSON("_id" << 20 << "host"
- << "h2")
- << BSON("_id" << 30 << "host"
- << "h3"))
- << "settings" << BSON("protocolVersion" << 1)),
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 20 << "host"
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))
+ << "settings"
+ << BSON("protocolVersion" << 1)),
-1);
ReplSetHeartbeatArgsV1 args;
args.setSetName("rs0");
@@ -1764,8 +1901,11 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenBecomingSecondaryInSingleNodeSet) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "hself"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself"))),
0);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -1782,10 +1922,13 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
ReplicaSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority" << 0))));
+ << "priority"
+ << 0))));
getTopoCoord().updateConfig(cfg, 0, now()++, OpTime());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -1798,8 +1941,11 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "hself"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself"))),
0);
ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
}
@@ -1810,10 +1956,13 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
ReplicaSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority" << 0))));
+ << "priority"
+ << 0))));
getTopoCoord().updateConfig(cfg, 0, now()++, OpTime());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -1831,7 +1980,9 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
// config to be absent from the set
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -1842,16 +1993,18 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s);
// reconfig to add to set
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
// having been added to the config, we should no longer be REMOVED and should enter STARTUP2
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -1860,23 +2013,27 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfig) {
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -1892,8 +2049,11 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -1908,7 +2068,9 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -1924,8 +2086,11 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -1938,17 +2103,20 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
// now lose primary due to loss of electability
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"
- << "priority" << 0)
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority"
+ << 0)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
}
@@ -1958,8 +2126,11 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
@@ -1974,38 +2145,45 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
// Now reconfig in ways that leave us electable and ensure we are still the primary.
// Add hosts
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0,
- Date_t::fromMillisSinceEpoch(-1),
- OpTime(Timestamp(10, 0), 0));
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0,
+ Date_t::fromMillisSinceEpoch(-1),
+ OpTime(Timestamp(10, 0), 0));
ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
// Change priorities and tags
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"
- << "priority" << 10)
- << BSON("_id" << 1 << "host"
- << "host2:27017"
- << "priority" << 5 << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rack1")))),
- 0,
- Date_t::fromMillisSinceEpoch(-1),
- OpTime(Timestamp(10, 0), 0));
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority"
+ << 10)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017"
+ << "priority"
+ << 5
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rack1")))),
+ 0,
+ Date_t::fromMillisSinceEpoch(-1),
+ OpTime(Timestamp(10, 0), 0));
ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
}
@@ -2013,7 +2191,9 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
@@ -2025,16 +2205,18 @@ TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
// reconfig and stay secondary
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
}
@@ -2042,12 +2224,15 @@ TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileAwareOfPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2059,12 +2244,15 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileAwareOfPrimary) {
TEST_F(TopoCoordTest, ShouldStandForElectionDespiteNotCloseEnoughToLastOptime) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2076,12 +2264,15 @@ TEST_F(TopoCoordTest, ShouldStandForElectionDespiteNotCloseEnoughToLastOptime) {
TEST_F(TopoCoordTest, VoteForMyselfFailsWhileNotCandidate) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
ASSERT_FALSE(getTopoCoord().voteForMyself(now()++));
@@ -2090,13 +2281,17 @@ TEST_F(TopoCoordTest, VoteForMyselfFailsWhileNotCandidate) {
TEST_F(TopoCoordTest, NodeReturnsArbiterWhenGetMemberStateRunsAgainstArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
ASSERT_EQUALS(MemberState::RS_ARBITER, getTopoCoord().getMemberState().s);
}
@@ -2111,20 +2306,28 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileRemovedFromTheConfig) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -2134,11 +2337,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ASSERT_TRUE(response.getVoteGranted());
ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 1LL << "configVersion" << 1LL
- << "lastCommittedOp" << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response2;
// different candidate same term, should be a problem
@@ -2150,12 +2359,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2163,8 +2375,14 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 1LL
- << "candidateIndex" << 0LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2176,12 +2394,19 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// second dry run fine
ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "dryRun" << true << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
@@ -2190,12 +2415,19 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// real request fine
ReplSetRequestVotesArgs args3;
- args3.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "dryRun" << false << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args3.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response3;
getTopoCoord().processReplSetRequestVotes(args3, &response3, lastAppliedOpTime);
@@ -2204,12 +2436,19 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// dry post real, fails
ReplSetRequestVotesArgs args4;
- args4.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "dryRun" << false << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args4.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response4;
getTopoCoord().processReplSetRequestVotes(args4, &response4, lastAppliedOpTime);
@@ -2220,12 +2459,15 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2233,8 +2475,14 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << false << "term" << 1LL
- << "candidateIndex" << 0LL << "configVersion" << 1LL
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2246,12 +2494,19 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
// dry post real, fails
ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "dryRun" << false << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
@@ -2262,12 +2517,15 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2275,8 +2533,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -2289,12 +2552,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2302,8 +2568,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 1LL << "candidateIndex" << 1LL
- << "configVersion" << 0LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 0LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -2316,12 +2587,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2333,8 +2607,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 1LL << "candidateIndex" << 1LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -2348,12 +2627,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2362,8 +2644,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 3LL << "candidateIndex" << 1LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 3LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime2 = {Timestamp(20, 0), 0};
@@ -2376,12 +2663,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -2389,12 +2679,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -2408,8 +2703,14 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "dryRun" << true << "term" << 2LL
- << "candidateIndex" << 0LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 2LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2423,12 +2724,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -2436,12 +2740,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -2455,8 +2764,14 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 2LL
- << "candidateIndex" << 1LL << "configVersion" << 0LL
+ << "dryRun"
+ << true
+ << "term"
+ << 2LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 0LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2470,12 +2785,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -2483,12 +2801,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -2501,8 +2824,14 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 0LL
- << "candidateIndex" << 1LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 0LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2516,12 +2845,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -2529,12 +2861,17 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -2548,8 +2885,14 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 1LL
- << "candidateIndex" << 1LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2563,12 +2906,15 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -2576,12 +2922,17 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -2595,8 +2946,14 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 3LL
- << "candidateIndex" << 1LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 3LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2618,13 +2975,19 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedIfCSRSButHaveNoReadCommittedSuppor
updateConfig(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s);
}
@@ -2639,13 +3002,19 @@ TEST_F(TopoCoordTest, NodeBecomesSecondaryAsNormalWhenReadCommittedSupportedAndC
updateConfig(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2657,17 +3026,22 @@ class HeartbeatResponseTestV1 : public TopoCoordTest {
public:
virtual void setUp() {
TopoCoordTest::setUp();
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 5 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
}
};
@@ -2683,15 +3057,23 @@ TEST_F(HeartbeatResponseTestV1,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 7 << "members"
+ << "version"
+ << 7
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "buildIndexes" << false << "priority" << 0)
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0)
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes" << false << "priority" << 0))),
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0))),
0);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
"rs0",
@@ -3017,11 +3399,15 @@ TEST_F(HeartbeatResponseTestV1, ReconfigNodeRemovedBetweenHeartbeatRequestAndRep
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017"))
- << "protocolVersion" << 1),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017"))
+ << "protocolVersion"
+ << 1),
0);
ReplSetHeartbeatResponse hb;
@@ -3066,11 +3452,15 @@ TEST_F(HeartbeatResponseTestV1, ReconfigBetweenHeartbeatRequestAndRepsonse) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "protocolVersion" << 1),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "protocolVersion"
+ << 1),
0);
ReplSetHeartbeatResponse hb;
@@ -3131,14 +3521,20 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleAPriorityTakeoverWhenElectableAndReceiveHeartbeatFromLowerPriorityPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority" << 2)
+ << "priority"
+ << 2)
<< BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 6 << "host"
- << "host7:27017"))
- << "protocolVersion" << 1 << "settings"
+ << "host2:27017")
+ << BSON("_id" << 6 << "host"
+ << "host7:27017"))
+ << "protocolVersion"
+ << 1
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -3162,15 +3558,21 @@ TEST_F(HeartbeatResponseTestV1,
TEST_F(HeartbeatResponseTestV1, UpdateHeartbeatDataTermPreventsPriorityTakeover) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host0:27017"
- << "priority" << 2)
- << BSON("_id" << 1 << "host"
- << "host1:27017"
- << "priority" << 3)
- << BSON("_id" << 2 << "host"
- << "host2:27017"))
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0:27017"
+ << "priority"
+ << 2)
+ << BSON("_id" << 1 << "host"
+ << "host1:27017"
+ << "priority"
+ << 3)
+ << BSON("_id" << 2 << "host"
+ << "host2:27017"))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -3223,26 +3625,43 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleElectionIfAMajorityOfVotersIsVisibleEvenThoughATrueMajorityIsNot) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 3 << "host"
<< "host4:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 4 << "host"
<< "host5:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 5 << "host"
<< "host6:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 6 << "host"
- << "host7:27017")) << "protocolVersion" << 1
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
+ << "host7:27017"))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -3323,14 +3742,19 @@ TEST_F(HeartbeatResponseTestV1,
NodeDoesNotStandForElectionWhenPrimaryIsMarkedDownViaHeartbeatButWeAreAnArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "protocolVersion" << 1),
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "protocolVersion"
+ << 1),
0);
OpTime election = OpTime(Timestamp(400, 0), 0);
@@ -3455,14 +3879,19 @@ TEST_F(HeartbeatResponseTestV1,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority" << 0)
+ << "priority"
+ << 0)
<< BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "protocolVersion" << 1),
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "protocolVersion"
+ << 1),
0);
OpTime election = OpTime(Timestamp(400, 0), 0);
@@ -3553,18 +3982,24 @@ TEST_F(HeartbeatResponseTestV1,
// In this test, the Topology coordinator sees a PRIMARY ("host2") and then sees a higher
// priority and stale node ("host3"). It responds with NoAction, as it should in all
// multiprimary states in PV1.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
setSelfMemberState(MemberState::RS_SECONDARY);
OpTime election = OpTime(Timestamp(1000, 0), 0);
@@ -3585,18 +4020,24 @@ TEST_F(HeartbeatResponseTestV1,
// In this test, the Topology coordinator becomes PRIMARY and then sees a higher priority
// and stale node ("host3"). It responds with NoAction, as it should in all
// multiprimary states in PV1.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
OpTime staleTime = OpTime();
@@ -3614,18 +4055,24 @@ TEST_F(HeartbeatResponseTestV1,
// In this test, the Topology coordinator becomes PRIMARY and then sees a higher priority
// and equally fresh node ("host3"). It responds with NoAction, as it should in all
// multiprimary states in PV1.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
@@ -3644,18 +4091,24 @@ TEST_F(HeartbeatResponseTestV1,
// In this test, the Topology coordinator sees a PRIMARY ("host2") and then sees a higher
// priority and similarly fresh node ("host3"). It responds with NoAction, as it should
// in all multiprimary states in PV1.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
setSelfMemberState(MemberState::RS_SECONDARY);
OpTime election = OpTime();
@@ -3811,15 +4264,21 @@ TEST_F(HeartbeatResponseTestV1, ShouldNotChangeSyncSourceWhenFresherMemberDoesNo
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 6 << "members"
+ << "version"
+ << 6
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes" << false << "priority" << 0))
- << "protocolVersion" << 1),
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0))
+ << "protocolVersion"
+ << 1),
0);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
"rs0",
@@ -4184,14 +4643,18 @@ TEST_F(HeartbeatResponseHighVerbosityTestV1, UpdateHeartbeatDataSameConfig) {
ReplicaSetConfig originalConfig;
originalConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion" << 1 << "settings"
+ << "protocolVersion"
+ << 1
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)));
ReplSetHeartbeatResponse sameConfigResponse;
@@ -4212,9 +4675,8 @@ TEST_F(HeartbeatResponseHighVerbosityTestV1, UpdateHeartbeatDataSameConfig) {
stopCapturingLogMessages();
ASSERT_NO_ACTION(action.getAction());
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "Config from heartbeat response was "
- "same as ours."));
+ countLogLinesContaining("Config from heartbeat response was "
+ "same as ours."));
}
TEST_F(HeartbeatResponseHighVerbosityTestV1,
diff --git a/src/mongo/db/repl/update_position_args.cpp b/src/mongo/db/repl/update_position_args.cpp
index 6fa63988a45..80f3505b2cc 100644
--- a/src/mongo/db/repl/update_position_args.cpp
+++ b/src/mongo/db/repl/update_position_args.cpp
@@ -33,8 +33,8 @@
#include "mongo/base/status.h"
#include "mongo/bson/util/bson_check.h"
#include "mongo/bson/util/bson_extract.h"
-#include "mongo/db/repl/bson_extract_optime.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/repl/bson_extract_optime.h"
namespace mongo {
namespace repl {
diff --git a/src/mongo/db/repl/vote_requester_test.cpp b/src/mongo/db/repl/vote_requester_test.cpp
index c6e1ebe7b61..ba4691019d8 100644
--- a/src/mongo/db/repl/vote_requester_test.cpp
+++ b/src/mongo/db/repl/vote_requester_test.cpp
@@ -32,9 +32,9 @@
#include "mongo/base/status.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/repl/vote_requester.h"
#include "mongo/db/repl/repl_set_request_votes_args.h"
#include "mongo/db/repl/replication_executor.h"
+#include "mongo/db/repl/vote_requester.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/stdx/functional.h"
#include "mongo/unittest/unittest.h"
@@ -58,22 +58,29 @@ class VoteRequesterTest : public mongo::unittest::Test {
public:
virtual void setUp() {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(
- BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1") << BSON("_id" << 2 << "host"
- << "host2")
- << BSON("_id" << 3 << "host"
- << "host3"
- << "votes" << 0 << "priority" << 0)
- << BSON("_id" << 4 << "host"
- << "host4"
- << "votes" << 0 << "priority" << 0)))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)))));
ASSERT_OK(config.validate());
long long candidateId = 0;
long long term = 2;
@@ -184,22 +191,29 @@ class VoteRequesterDryRunTest : public VoteRequesterTest {
public:
virtual void setUp() {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(
- BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1") << BSON("_id" << 2 << "host"
- << "host2")
- << BSON("_id" << 3 << "host"
- << "host3"
- << "votes" << 0 << "priority" << 0)
- << BSON("_id" << 4 << "host"
- << "host4"
- << "votes" << 0 << "priority" << 0)))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)))));
ASSERT_OK(config.validate());
long long candidateId = 0;
long long term = 2;
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index de1dbf6487c..8f24954949b 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -37,8 +37,8 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/index_legacy.h"
#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/index_legacy.h"
#include "mongo/db/keypattern.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index 9ea761808ac..e3abdbb9354 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -44,10 +44,10 @@
#include "mongo/db/range_arithmetic.h"
#include "mongo/db/range_deleter_service.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/service_context.h"
+#include "mongo/db/s/chunk_move_write_concern_options.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
-#include "mongo/db/s/chunk_move_write_concern_options.h"
+#include "mongo/db/service_context.h"
#include "mongo/s/migration_secondary_throttle_options.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index cc0983eeba5..5c16ace947b 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -270,7 +270,8 @@ StatusWith<std::unique_ptr<CollectionMetadata>> CollectionMetadata::cloneMerge(
if (!validStartEnd || !validNoHoles) {
return {ErrorCodes::IllegalOperation,
stream() << "cannot merge range " << rangeToString(minKey, maxKey)
- << ", overlapping chunks " << overlapToString(overlap)
+ << ", overlapping chunks "
+ << overlapToString(overlap)
<< (!validStartEnd ? " do not have the same min and max key"
: " are not all adjacent")};
}
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index c1ee18605b7..b6861350ed8 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -116,7 +116,8 @@ TEST_F(NoChunkFixture, IsKeyValid) {
ASSERT_TRUE(getCollMetadata().isValidKey(BSON("a" << 3)));
ASSERT_FALSE(getCollMetadata().isValidKey(BSON("a"
<< "abcde"
- << "b" << 1)));
+ << "b"
+ << 1)));
ASSERT_FALSE(getCollMetadata().isValidKey(BSON("c"
<< "abcde")));
}
@@ -333,10 +334,12 @@ protected:
BSONObj fooSingle = BSON(
ChunkType::name("test.foo-a_10")
- << ChunkType::ns("test.foo") << ChunkType::min(BSON("a" << 10))
+ << ChunkType::ns("test.foo")
+ << ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("a" << 20))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(epoch) << ChunkType::shard("shard0000"));
+ << ChunkType::DEPRECATED_epoch(epoch)
+ << ChunkType::shard("shard0000"));
std::vector<BSONObj> chunksToSend{fooSingle};
auto future = launchAsync([this] {
@@ -587,10 +590,12 @@ protected:
BSONObj fooSingle = BSON(
ChunkType::name("test.foo-a_MinKey")
- << ChunkType::ns("test.foo") << ChunkType::min(BSON("a" << MINKEY << "b" << MINKEY))
+ << ChunkType::ns("test.foo")
+ << ChunkType::min(BSON("a" << MINKEY << "b" << MINKEY))
<< ChunkType::max(BSON("a" << MAXKEY << "b" << MAXKEY))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(epoch) << ChunkType::shard("shard0000"));
+ << ChunkType::DEPRECATED_epoch(epoch)
+ << ChunkType::shard("shard0000"));
std::vector<BSONObj> chunksToSend{fooSingle};
auto future = launchAsync([this] {
@@ -654,16 +659,20 @@ protected:
std::vector<BSONObj> chunksToSend;
chunksToSend.push_back(BSON(
ChunkType::name("test.foo-a_10")
- << ChunkType::ns("test.foo") << ChunkType::min(BSON("a" << 10 << "b" << 0))
+ << ChunkType::ns("test.foo")
+ << ChunkType::min(BSON("a" << 10 << "b" << 0))
<< ChunkType::max(BSON("a" << 20 << "b" << 0))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(epoch) << ChunkType::shard("shard0000")));
+ << ChunkType::DEPRECATED_epoch(epoch)
+ << ChunkType::shard("shard0000")));
chunksToSend.push_back(BSON(
ChunkType::name("test.foo-a_10")
- << ChunkType::ns("test.foo") << ChunkType::min(BSON("a" << 30 << "b" << 0))
+ << ChunkType::ns("test.foo")
+ << ChunkType::min(BSON("a" << 30 << "b" << 0))
<< ChunkType::max(BSON("a" << 40 << "b" << 0))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(epoch) << ChunkType::shard("shard0000")));
+ << ChunkType::DEPRECATED_epoch(epoch)
+ << ChunkType::shard("shard0000")));
auto future = launchAsync([this] {
MetadataLoader loader;
@@ -847,30 +856,36 @@ protected:
ChunkVersion version(1, 1, epoch);
chunksToSend.push_back(BSON(
ChunkType::name("x.y-a_MinKey")
- << ChunkType::ns("x.y") << ChunkType::min(BSON("a" << MINKEY))
+ << ChunkType::ns("x.y")
+ << ChunkType::min(BSON("a" << MINKEY))
<< ChunkType::max(BSON("a" << 10))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(version.toLong()))
- << ChunkType::DEPRECATED_epoch(version.epoch()) << ChunkType::shard("shard0000")));
+ << ChunkType::DEPRECATED_epoch(version.epoch())
+ << ChunkType::shard("shard0000")));
}
{
ChunkVersion version(1, 3, epoch);
chunksToSend.push_back(BSON(
ChunkType::name("x.y-a_10")
- << ChunkType::ns("x.y") << ChunkType::min(BSON("a" << 10))
+ << ChunkType::ns("x.y")
+ << ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("a" << 20))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(version.toLong()))
- << ChunkType::DEPRECATED_epoch(version.epoch()) << ChunkType::shard("shard0000")));
+ << ChunkType::DEPRECATED_epoch(version.epoch())
+ << ChunkType::shard("shard0000")));
}
{
ChunkVersion version(1, 2, epoch);
chunksToSend.push_back(BSON(
ChunkType::name("x.y-a_30")
- << ChunkType::ns("x.y") << ChunkType::min(BSON("a" << 30))
+ << ChunkType::ns("x.y")
+ << ChunkType::min(BSON("a" << 30))
<< ChunkType::max(BSON("a" << MAXKEY))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(version.toLong()))
- << ChunkType::DEPRECATED_epoch(version.epoch()) << ChunkType::shard("shard0000")));
+ << ChunkType::DEPRECATED_epoch(version.epoch())
+ << ChunkType::shard("shard0000")));
}
auto future = launchAsync([this] {
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index b4c924034a0..bccdf9cf009 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -131,11 +131,11 @@ void CollectionShardingState::checkShardVersionOrThrow(OperationContext* txn) co
ChunkVersion received;
ChunkVersion wanted;
if (!_checkShardVersionOk(txn, &errmsg, &received, &wanted)) {
- throw SendStaleConfigException(_nss.ns(),
- str::stream() << "[" << _nss.ns()
- << "] shard version not ok: " << errmsg,
- received,
- wanted);
+ throw SendStaleConfigException(
+ _nss.ns(),
+ str::stream() << "[" << _nss.ns() << "] shard version not ok: " << errmsg,
+ received,
+ wanted);
}
}
@@ -246,8 +246,8 @@ bool CollectionShardingState::_checkShardVersionOk(OperationContext* txn,
// Set migration critical section on operation sharding state: operation will wait for the
// migration to finish before returning failure and retrying.
- OperationShardingState::get(txn)
- .setMigrationCriticalSection(_sourceMgr->getMigrationCriticalSection());
+ OperationShardingState::get(txn).setMigrationCriticalSection(
+ _sourceMgr->getMigrationCriticalSection());
return false;
}
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index 16b2c903b49..c12d4395f7f 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -34,12 +34,12 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context_noop.h"
-#include "mongo/db/service_context_noop.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/db/service_context_noop.h"
+#include "mongo/db/service_context_noop.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
diff --git a/src/mongo/db/s/metadata_loader_test.cpp b/src/mongo/db/s/metadata_loader_test.cpp
index c0756a8f29b..67ea0317db0 100644
--- a/src/mongo/db/s/metadata_loader_test.cpp
+++ b/src/mongo/db/s/metadata_loader_test.cpp
@@ -71,7 +71,8 @@ protected:
void expectFindOnConfigSendChunksDefault() {
BSONObj chunk = BSON(
ChunkType::name("test.foo-a_MinKey")
- << ChunkType::ns("test.foo") << ChunkType::min(BSON("a" << MINKEY))
+ << ChunkType::ns("test.foo")
+ << ChunkType::min(BSON("a" << MINKEY))
<< ChunkType::max(BSON("a" << MAXKEY))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(_maxCollVersion.toLong()))
<< ChunkType::DEPRECATED_epoch(_maxCollVersion.epoch())
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 542c61dc8ce..a36feb23ebf 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -49,8 +49,8 @@
#include "mongo/executor/task_executor.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/rpc/get_status_from_command_result.h"
-#include "mongo/s/client/shard_registry.h"
#include "mongo/s/chunk.h"
+#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
#include "mongo/util/elapsed_tracker.h"
#include "mongo/util/log.h"
@@ -514,7 +514,8 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* txn
if (!idx) {
return {ErrorCodes::IndexNotFound,
str::stream() << "can't find index with prefix " << _shardKeyPattern.toBSON()
- << " in storeCurrentLocs for " << _args.getNss().ns()};
+ << " in storeCurrentLocs for "
+ << _args.getNss().ns()};
}
// Install the stage, which will listen for notifications on the collection
@@ -604,10 +605,19 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* txn
return {
ErrorCodes::ChunkTooBig,
str::stream() << "Cannot move chunk: the maximum number of documents for a chunk is "
- << maxRecsWhenFull << ", the maximum chunk size is "
- << _args.getMaxChunkSizeBytes() << ", average document size is "
- << avgRecSize << ". Found " << recCount << " documents in chunk "
- << " ns: " << _args.getNss().ns() << " " << _args.getMinKey() << " -> "
+ << maxRecsWhenFull
+ << ", the maximum chunk size is "
+ << _args.getMaxChunkSizeBytes()
+ << ", average document size is "
+ << avgRecSize
+ << ". Found "
+ << recCount
+ << " documents in chunk "
+ << " ns: "
+ << _args.getNss().ns()
+ << " "
+ << _args.getMinKey()
+ << " -> "
<< _args.getMaxKey()};
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
index eb72772db1e..7f7850cb22c 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
@@ -38,9 +38,9 @@
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/s/collection_sharding_state.h"
+#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
#include "mongo/db/s/migration_source_manager.h"
#include "mongo/db/s/sharding_state.h"
-#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
/**
* This file contains commands, which are specific to the legacy chunk cloner source.
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 36592d6bd2e..e6990087018 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -50,13 +50,13 @@
#include "mongo/db/range_deleter_service.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/service_context.h"
-#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/move_timing_helper.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/service_context.h"
+#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/logger/ramlog.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/shard_key_pattern.h"
@@ -260,8 +260,14 @@ Status MigrationDestinationManager::start(const string& ns,
if (_sessionId) {
return Status(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Active migration already in progress "
- << "ns: " << _ns << ", from: " << _from << ", min: " << _min
- << ", max: " << _max);
+ << "ns: "
+ << _ns
+ << ", from: "
+ << _from
+ << ", min: "
+ << _min
+ << ", max: "
+ << _max);
}
_state = READY;
@@ -978,9 +984,16 @@ Status MigrationDestinationManager::_notePending(OperationContext* txn,
if (!metadata || metadata->getCollVersion().epoch() != epoch) {
return {ErrorCodes::StaleShardVersion,
str::stream() << "could not note chunk "
- << "[" << min << "," << max << ")"
- << " as pending because the epoch for " << nss.ns()
- << " has changed from " << epoch << " to "
+ << "["
+ << min
+ << ","
+ << max
+ << ")"
+ << " as pending because the epoch for "
+ << nss.ns()
+ << " has changed from "
+ << epoch
+ << " to "
<< (metadata ? metadata->getCollVersion().epoch()
: ChunkVersion::UNSHARDED().epoch())};
}
@@ -1023,10 +1036,18 @@ Status MigrationDestinationManager::_forgetPending(OperationContext* txn,
if (!metadata || metadata->getCollVersion().epoch() != epoch) {
return {ErrorCodes::StaleShardVersion,
str::stream() << "no need to forget pending chunk "
- << "[" << min << "," << max << ")"
- << " because the epoch for " << nss.ns() << " has changed from "
- << epoch << " to " << (metadata ? metadata->getCollVersion().epoch()
- : ChunkVersion::UNSHARDED().epoch())};
+ << "["
+ << min
+ << ","
+ << max
+ << ")"
+ << " because the epoch for "
+ << nss.ns()
+ << " has changed from "
+ << epoch
+ << " to "
+ << (metadata ? metadata->getCollVersion().epoch()
+ : ChunkVersion::UNSHARDED().epoch())};
}
ChunkType chunk;
diff --git a/src/mongo/db/s/migration_session_id_test.cpp b/src/mongo/db/s/migration_session_id_test.cpp
index 7cb443207de..02625995941 100644
--- a/src/mongo/db/s/migration_session_id_test.cpp
+++ b/src/mongo/db/s/migration_session_id_test.cpp
@@ -71,8 +71,9 @@ TEST(MigrationSessionId, Comparison) {
}
TEST(MigrationSessionId, ErrorWhenTypeIsNotString) {
- ASSERT_NOT_OK(MigrationSessionId::extractFromBSON(
- BSON("SomeField" << 1 << "sessionId" << Date_t::now())).getStatus());
+ ASSERT_NOT_OK(
+ MigrationSessionId::extractFromBSON(BSON("SomeField" << 1 << "sessionId" << Date_t::now()))
+ .getStatus());
ASSERT_NOT_OK(MigrationSessionId::extractFromBSON(BSON("SomeField" << 1 << "sessionId" << 2))
.getStatus());
}
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 109e6cfe8f7..49b07775b28 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -35,9 +35,9 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/collection_sharding_state.h"
+#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/sharding_state_recovery.h"
@@ -91,8 +91,12 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* txn, MoveChunkR
if (!refreshStatus.isOK()) {
uasserted(refreshStatus.code(),
str::stream() << "moveChunk cannot start migrate of chunk "
- << "[" << _args.getMinKey() << "," << _args.getMaxKey()
- << ") due to " << refreshStatus.toString());
+ << "["
+ << _args.getMinKey()
+ << ","
+ << _args.getMaxKey()
+ << ") due to "
+ << refreshStatus.toString());
}
if (shardVersion.majorVersion() == 0) {
@@ -100,20 +104,29 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* txn, MoveChunkR
// the first place
uasserted(ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "moveChunk cannot start migrate of chunk "
- << "[" << _args.getMinKey() << "," << _args.getMaxKey() << ")"
+ << "["
+ << _args.getMinKey()
+ << ","
+ << _args.getMaxKey()
+ << ")"
<< " with zero shard version");
}
if (expectedCollectionVersion.epoch() != shardVersion.epoch()) {
- throw SendStaleConfigException(
- _args.getNss().ns(),
- str::stream() << "moveChunk cannot move chunk "
- << "[" << _args.getMinKey() << "," << _args.getMaxKey() << "), "
- << "collection may have been dropped. "
- << "current epoch: " << shardVersion.epoch()
- << ", cmd epoch: " << expectedCollectionVersion.epoch(),
- expectedCollectionVersion,
- shardVersion);
+ throw SendStaleConfigException(_args.getNss().ns(),
+ str::stream() << "moveChunk cannot move chunk "
+ << "["
+ << _args.getMinKey()
+ << ","
+ << _args.getMaxKey()
+ << "), "
+ << "collection may have been dropped. "
+ << "current epoch: "
+ << shardVersion.epoch()
+ << ", cmd epoch: "
+ << expectedCollectionVersion.epoch(),
+ expectedCollectionVersion,
+ shardVersion);
}
// Snapshot the committed metadata from the time the migration starts
@@ -137,13 +150,17 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* txn, MoveChunkR
origChunk.getMin().woCompare(_args.getMinKey()) ||
origChunk.getMax().woCompare(_args.getMaxKey())) {
// Our boundaries are different from those passed in
- throw SendStaleConfigException(
- _args.getNss().ns(),
- str::stream() << "moveChunk cannot find chunk "
- << "[" << _args.getMinKey() << "," << _args.getMaxKey() << ")"
- << " to migrate, the chunk boundaries may be stale",
- expectedCollectionVersion,
- shardVersion);
+ throw SendStaleConfigException(_args.getNss().ns(),
+ str::stream()
+ << "moveChunk cannot find chunk "
+ << "["
+ << _args.getMinKey()
+ << ","
+ << _args.getMaxKey()
+ << ")"
+ << " to migrate, the chunk boundaries may be stale",
+ expectedCollectionVersion,
+ shardVersion);
}
}
@@ -160,12 +177,14 @@ Status MigrationSourceManager::startClone(OperationContext* txn) {
invariant(_state == kCreated);
auto scopedGuard = MakeGuard([&] { cleanupOnError(txn); });
- grid.catalogManager(txn)
- ->logChange(txn,
- "moveChunk.start",
- _args.getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId() << "to" << _args.getToShardId()));
+ grid.catalogManager(txn)->logChange(
+ txn,
+ "moveChunk.start",
+ _args.getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()));
_cloneDriver = stdx::make_unique<MigrationChunkClonerSourceLegacy>(
_args, _committedMetadata->getKeyPattern());
@@ -228,7 +247,8 @@ Status MigrationSourceManager::enterCriticalSection(OperationContext* txn) {
str::stream()
<< "Sharding metadata changed while holding distributed lock. Expected: "
<< _committedMetadata->getCollVersion().toString()
- << ", actual: " << css->getMetadata()->getCollVersion().toString()};
+ << ", actual: "
+ << css->getMetadata()->getCollVersion().toString()};
}
// IMPORTANT: After this line, the critical section is in place and needs to be rolled back
@@ -394,15 +414,20 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) {
"moveChunk.validating",
_args.getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId() << "to" << _args.getToShardId()));
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()));
if (!status.isOK()) {
- fassertStatusOK(40137,
- {status.code(),
- str::stream()
- << "applyOps failed to commit chunk [" << _args.getMinKey() << ","
- << _args.getMaxKey() << ") due to " << causedBy(applyOpsStatus)
- << ", and updating the optime with a write before refreshing the "
- << "metadata also failed: " << causedBy(status)});
+ fassertStatusOK(
+ 40137,
+ {status.code(),
+ str::stream() << "applyOps failed to commit chunk [" << _args.getMinKey() << ","
+ << _args.getMaxKey()
+ << ") due to "
+ << causedBy(applyOpsStatus)
+ << ", and updating the optime with a write before refreshing the "
+ << "metadata also failed: "
+ << causedBy(status)});
}
ShardingState* const shardingState = ShardingState::get(txn);
@@ -412,7 +437,9 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) {
fassertStatusOK(34431,
{refreshStatus.code(),
str::stream() << "applyOps failed to commit chunk [" << _args.getMinKey()
- << "," << _args.getMaxKey() << ") due to "
+ << ","
+ << _args.getMaxKey()
+ << ") due to "
<< causedBy(applyOpsStatus)
<< ", and refreshing collection metadata failed: "
<< causedBy(refreshStatus)});
@@ -455,12 +482,14 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) {
scopedGuard.Dismiss();
_cleanup(txn);
- grid.catalogManager(txn)
- ->logChange(txn,
- "moveChunk.commit",
- _args.getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId() << "to" << _args.getToShardId()));
+ grid.catalogManager(txn)->logChange(
+ txn,
+ "moveChunk.commit",
+ _args.getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()));
return Status::OK();
}
@@ -470,12 +499,14 @@ void MigrationSourceManager::cleanupOnError(OperationContext* txn) {
return;
}
- grid.catalogManager(txn)
- ->logChange(txn,
- "moveChunk.error",
- _args.getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId() << "to" << _args.getToShardId()));
+ grid.catalogManager(txn)->logChange(
+ txn,
+ "moveChunk.error",
+ _args.getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()));
_cleanup(txn);
}
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 42beb940283..decb11713ca 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -90,9 +90,10 @@ private:
*/
DistLockManager::ScopedDistLock _acquireDistLock(OperationContext* txn,
const MoveChunkRequest& args) {
- const std::string whyMessage(str::stream() << "migrating chunk [" << args.getMinKey()
- << ", " << args.getMaxKey() << ") in "
- << args.getNss().ns());
+ const std::string whyMessage(
+ str::stream() << "migrating chunk [" << args.getMinKey() << ", " << args.getMaxKey()
+ << ") in "
+ << args.getNss().ns());
auto distLockStatus =
grid.catalogManager(txn)->distLock(txn, args.getNss().ns(), whyMessage);
if (!distLockStatus.isOK()) {
diff --git a/src/mongo/db/s/move_timing_helper.cpp b/src/mongo/db/s/move_timing_helper.cpp
index 68a9c83c9f3..4ff91d6e658 100644
--- a/src/mongo/db/s/move_timing_helper.cpp
+++ b/src/mongo/db/s/move_timing_helper.cpp
@@ -82,8 +82,8 @@ MoveTimingHelper::~MoveTimingHelper() {
_b.append("errmsg", *_cmdErrmsg);
}
- grid.catalogManager(_txn)
- ->logChange(_txn, str::stream() << "moveChunk." << _where, _ns, _b.obj());
+ grid.catalogManager(_txn)->logChange(
+ _txn, str::stream() << "moveChunk." << _where, _ns, _b.obj());
} catch (const std::exception& e) {
warning() << "couldn't record timing for moveChunk '" << _where << "': " << e.what();
}
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index fc75b886961..08bbce81b8c 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -36,9 +36,9 @@
#include "mongo/client/remote_command_targeter.h"
#include "mongo/client/remote_command_targeter_factory_impl.h"
#include "mongo/s/balancer/balancer_configuration.h"
-#include "mongo/s/client/shard_remote.h"
-#include "mongo/s/client/shard_local.h"
#include "mongo/s/client/shard_factory.h"
+#include "mongo/s/client/shard_local.h"
+#include "mongo/s/client/shard_remote.h"
#include "mongo/s/sharding_egress_metadata_hook_for_mongod.h"
#include "mongo/s/sharding_initialization.h"
#include "mongo/stdx/memory.h"
@@ -61,10 +61,10 @@ Status initializeGlobalShardingStateForMongod(const ConnectionString& configCS)
shardId, connStr, targeterFactoryPtr->create(connStr));
};
- ShardFactory::BuilderCallable localBuilder =
- [](const ShardId& shardId, const ConnectionString& connStr) {
- return stdx::make_unique<ShardLocal>(shardId);
- };
+ ShardFactory::BuilderCallable localBuilder = [](const ShardId& shardId,
+ const ConnectionString& connStr) {
+ return stdx::make_unique<ShardLocal>(shardId);
+ };
ShardFactory::BuildersMap buildersMap{
{ConnectionString::SET, std::move(setBuilder)},
@@ -76,9 +76,8 @@ Status initializeGlobalShardingStateForMongod(const ConnectionString& configCS)
stdx::make_unique<ShardFactory>(std::move(buildersMap), std::move(targeterFactory));
return initializeGlobalShardingState(
- configCS,
- ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
- std::move(shardFactory),
- []() { return stdx::make_unique<rpc::ShardingEgressMetadataHookForMongod>(); });
+ configCS, ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, std::move(shardFactory), []() {
+ return stdx::make_unique<rpc::ShardingEgressMetadataHookForMongod>();
+ });
}
}
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index fbb6f0f2e62..1a66f122329 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -54,18 +54,18 @@
#include "mongo/rpc/metadata/config_server_metadata.h"
#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/client/shard_registry.h"
#include "mongo/s/chunk_version.h"
+#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/sharding_initialization.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
-#include <iostream>
-#include <iomanip>
-#include <ctime>
#include <chrono>
+#include <ctime>
+#include <iomanip>
+#include <iostream>
namespace mongo {
@@ -462,7 +462,8 @@ Status ShardingState::initializeFromShardIdentity(const ShardIdentityType& shard
if (_shardName != shardIdentity.getShardName()) {
return {ErrorCodes::InconsistentShardIdentity,
str::stream() << "shard name previously set as " << _shardName
- << " is different from stored: " << shardIdentity.getShardName()};
+ << " is different from stored: "
+ << shardIdentity.getShardName()};
}
auto prevConfigsvrConnStr = grid.shardRegistry()->getConfigServerConnectionString();
@@ -477,7 +478,8 @@ Status ShardingState::initializeFromShardIdentity(const ShardIdentityType& shard
return {ErrorCodes::InconsistentShardIdentity,
str::stream() << "config server connection string previously set as "
<< prevConfigsvrConnStr.toString()
- << " is different from stored: " << configSvrConnStr.toString()};
+ << " is different from stored: "
+ << configSvrConnStr.toString()};
}
// clusterId will only be unset if sharding state was initialized via the sharding
@@ -487,7 +489,8 @@ Status ShardingState::initializeFromShardIdentity(const ShardIdentityType& shard
} else if (_clusterId != shardIdentity.getClusterId()) {
return {ErrorCodes::InconsistentShardIdentity,
str::stream() << "cluster id previously set as " << _clusterId
- << " is different from stored: " << shardIdentity.getClusterId()};
+ << " is different from stored: "
+ << shardIdentity.getClusterId()};
}
return Status::OK();
diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp
index 1574f611403..d076b995f53 100644
--- a/src/mongo/db/s/sharding_state_recovery.cpp
+++ b/src/mongo/db/s/sharding_state_recovery.cpp
@@ -40,9 +40,9 @@
#include "mongo/db/dbhelpers.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/ops/update.h"
#include "mongo/db/ops/update_lifecycle_impl.h"
#include "mongo/db/ops/update_request.h"
-#include "mongo/db/ops/update.h"
#include "mongo/db/repl/bson_extract_optime.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/repl_client_info.h"
diff --git a/src/mongo/db/s/sharding_state_test.cpp b/src/mongo/db/s/sharding_state_test.cpp
index 5c710d352da..94b26b1f0b7 100644
--- a/src/mongo/db/s/sharding_state_test.cpp
+++ b/src/mongo/db/s/sharding_state_test.cpp
@@ -29,26 +29,26 @@
#include "mongo/platform/basic.h"
#include "mongo/base/status_with.h"
+#include "mongo/client/remote_command_targeter.h"
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
-#include "mongo/client/remote_command_targeter.h"
#include "mongo/client/replica_set_monitor.h"
-#include "mongo/db/service_context_noop.h"
-#include "mongo/executor/network_interface_mock.h"
-#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/db/service_context_noop.h"
+#include "mongo/db/service_context_noop.h"
+#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/task_executor_pool.h"
+#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/catalog_manager_mock.h"
#include "mongo/s/client/shard_factory.h"
-#include "mongo/s/client/shard_remote.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/client/shard_remote.h"
#include "mongo/s/grid.h"
#include "mongo/s/query/cluster_cursor_manager.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp
index 7e3473299f3..58018441072 100644
--- a/src/mongo/db/s/split_chunk_command.cpp
+++ b/src/mongo/db/s/split_chunk_command.cpp
@@ -220,7 +220,8 @@ public:
//
const string whyMessage(str::stream() << "splitting chunk [" << min << ", " << max
- << ") in " << nss.toString());
+ << ") in "
+ << nss.toString());
auto scopedDistLock = grid.catalogManager(txn)->distLock(
txn, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
@@ -453,8 +454,8 @@ public:
chunkDetail.append("of", newChunksSize);
appendShortVersion(chunkDetail.subobjStart("chunk"), *newChunks[i]);
- grid.catalogManager(txn)
- ->logChange(txn, "multi-split", nss.ns(), chunkDetail.obj());
+ grid.catalogManager(txn)->logChange(
+ txn, "multi-split", nss.ns(), chunkDetail.obj());
}
}
diff --git a/src/mongo/db/s/start_chunk_clone_request_test.cpp b/src/mongo/db/s/start_chunk_clone_request_test.cpp
index 5071ff61edc..2b977178781 100644
--- a/src/mongo/db/s/start_chunk_clone_request_test.cpp
+++ b/src/mongo/db/s/start_chunk_clone_request_test.cpp
@@ -65,9 +65,10 @@ TEST(StartChunkCloneRequest, CreateAsCommandComplete) {
ASSERT_EQ(sessionId.toString(), request.getSessionId().toString());
ASSERT(sessionId.matches(request.getSessionId()));
ASSERT_EQ("TestConfigRS/CS1:12345,CS2:12345,CS3:12345", request.getConfigServerCS().toString());
- ASSERT_EQ(assertGet(ConnectionString::parse(
- "TestDonorRS/Donor1:12345,Donor2:12345,Donor3:12345")).toString(),
- request.getFromShardConnectionString().toString());
+ ASSERT_EQ(
+ assertGet(ConnectionString::parse("TestDonorRS/Donor1:12345,Donor2:12345,Donor3:12345"))
+ .toString(),
+ request.getFromShardConnectionString().toString());
ASSERT_EQ("shard0002", request.getToShardId());
ASSERT_EQ(BSON("Key" << -100), request.getMinKey());
ASSERT_EQ(BSON("Key" << 100), request.getMaxKey());
diff --git a/src/mongo/db/s/type_shard_identity_test.cpp b/src/mongo/db/s/type_shard_identity_test.cpp
index 8a2382e4bf7..960faff68ba 100644
--- a/src/mongo/db/s/type_shard_identity_test.cpp
+++ b/src/mongo/db/s/type_shard_identity_test.cpp
@@ -47,7 +47,8 @@ TEST(ShardIdentityType, RoundTrip) {
<< "test/a:123"
<< "shardName"
<< "s1"
- << "clusterId" << clusterId);
+ << "clusterId"
+ << clusterId);
auto result = ShardIdentityType::fromBSON(doc);
ASSERT_OK(result.getStatus());
@@ -68,7 +69,8 @@ TEST(ShardIdentityType, ParseMissingId) {
<< "test/a:123"
<< "shardName"
<< "s1"
- << "clusterId" << OID::gen());
+ << "clusterId"
+ << OID::gen());
auto result = ShardIdentityType::fromBSON(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -79,7 +81,8 @@ TEST(ShardIdentityType, ParseMissingConfigsvrConnString) {
<< "shardIdentity"
<< "shardName"
<< "s1"
- << "clusterId" << OID::gen());
+ << "clusterId"
+ << OID::gen());
auto result = ShardIdentityType::fromBSON(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -90,7 +93,8 @@ TEST(ShardIdentityType, ParseMissingShardName) {
<< "shardIdentity"
<< "configsvrConnectionString"
<< "test/a:123"
- << "clusterId" << OID::gen());
+ << "clusterId"
+ << OID::gen());
auto result = ShardIdentityType::fromBSON(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -116,7 +120,8 @@ TEST(ShardIdentityType, InvalidConnectionString) {
<< "test/,,,"
<< "shardName"
<< "s1"
- << "clusterId" << clusterId);
+ << "clusterId"
+ << clusterId);
ASSERT_EQ(ErrorCodes::FailedToParse, ShardIdentityType::fromBSON(doc).getStatus());
}
@@ -129,7 +134,8 @@ TEST(ShardIdentityType, NonReplSetConnectionString) {
<< "local:123"
<< "shardName"
<< "s1"
- << "clusterId" << clusterId);
+ << "clusterId"
+ << clusterId);
ASSERT_EQ(ErrorCodes::UnsupportedFormat, ShardIdentityType::fromBSON(doc).getStatus());
}
diff --git a/src/mongo/db/server_options_helpers.cpp b/src/mongo/db/server_options_helpers.cpp
index 9ab8e97c86c..c1729324606 100644
--- a/src/mongo/db/server_options_helpers.cpp
+++ b/src/mongo/db/server_options_helpers.cpp
@@ -36,10 +36,10 @@
#define SYSLOG_NAMES
#include <syslog.h>
#endif
-#include <ios>
-#include <iostream>
#include <boost/filesystem.hpp>
#include <boost/filesystem/operations.hpp>
+#include <ios>
+#include <iostream>
#include "mongo/base/status.h"
#include "mongo/bson/util/builder.h"
@@ -82,26 +82,13 @@ typedef struct _code {
int c_val;
} CODE;
-CODE facilitynames[] = {{"auth", LOG_AUTH},
- {"cron", LOG_CRON},
- {"daemon", LOG_DAEMON},
- {"kern", LOG_KERN},
- {"lpr", LOG_LPR},
- {"mail", LOG_MAIL},
- {"news", LOG_NEWS},
- {"security", LOG_AUTH}, /* DEPRECATED */
- {"syslog", LOG_SYSLOG},
- {"user", LOG_USER},
- {"uucp", LOG_UUCP},
- {"local0", LOG_LOCAL0},
- {"local1", LOG_LOCAL1},
- {"local2", LOG_LOCAL2},
- {"local3", LOG_LOCAL3},
- {"local4", LOG_LOCAL4},
- {"local5", LOG_LOCAL5},
- {"local6", LOG_LOCAL6},
- {"local7", LOG_LOCAL7},
- {NULL, -1}};
+CODE facilitynames[] = {{"auth", LOG_AUTH}, {"cron", LOG_CRON}, {"daemon", LOG_DAEMON},
+ {"kern", LOG_KERN}, {"lpr", LOG_LPR}, {"mail", LOG_MAIL},
+ {"news", LOG_NEWS}, {"security", LOG_AUTH}, /* DEPRECATED */
+ {"syslog", LOG_SYSLOG}, {"user", LOG_USER}, {"uucp", LOG_UUCP},
+ {"local0", LOG_LOCAL0}, {"local1", LOG_LOCAL1}, {"local2", LOG_LOCAL2},
+ {"local3", LOG_LOCAL3}, {"local4", LOG_LOCAL4}, {"local5", LOG_LOCAL5},
+ {"local6", LOG_LOCAL6}, {"local7", LOG_LOCAL7}, {NULL, -1}};
#endif // !defined(INTERNAL_NOPRI)
#endif // defined(SYSLOG_NAMES)
@@ -127,10 +114,9 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
options->addOptionChaining("version", "version", moe::Switch, "show version information")
.setSources(moe::SourceAllLegacy);
- options->addOptionChaining("config",
- "config,f",
- moe::String,
- "configuration file specifying additional options")
+ options
+ ->addOptionChaining(
+ "config", "config,f", moe::String, "configuration file specifying additional options")
.setSources(moe::SourceAllLegacy);
// The verbosity level can be set at startup in the following ways. Note that if multiple
@@ -166,11 +152,12 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
// component: |
// Sharding: |
// verbosity: 5 | 5 (for Sharding only, 0 for default)
- options->addOptionChaining(
- "verbose",
- "verbose,v",
- moe::String,
- "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
+ options
+ ->addOptionChaining(
+ "verbose",
+ "verbose,v",
+ moe::String,
+ "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
.setImplicit(moe::Value(std::string("v")))
.setSources(moe::SourceAllLegacy);
@@ -183,11 +170,11 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
if (component == logger::LogComponent::kDefault) {
continue;
}
- options->addOptionChaining("systemLog.component." + component.getDottedName() +
- ".verbosity",
- "",
- moe::Int,
- "set component verbose level for " + component.getDottedName())
+ options
+ ->addOptionChaining("systemLog.component." + component.getDottedName() + ".verbosity",
+ "",
+ moe::Int,
+ "set component verbose level for " + component.getDottedName())
.setSources(moe::SourceYAMLConfig);
}
@@ -207,36 +194,39 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
options->addOptionChaining(
"net.maxIncomingConnections", "maxConns", moe::Int, maxConnInfoBuilder.str().c_str());
- options->addOptionChaining(
- "logpath",
- "logpath",
- moe::String,
- "log file to send write to instead of stdout - has to be a file, not directory")
+ options
+ ->addOptionChaining(
+ "logpath",
+ "logpath",
+ moe::String,
+ "log file to send write to instead of stdout - has to be a file, not directory")
.setSources(moe::SourceAllLegacy)
.incompatibleWith("syslog");
options
->addOptionChaining(
- "systemLog.path",
- "",
- moe::String,
- "log file to send writes to if logging to a file - has to be a file, not directory")
+ "systemLog.path",
+ "",
+ moe::String,
+ "log file to send writes to if logging to a file - has to be a file, not directory")
.setSources(moe::SourceYAMLConfig)
.hidden();
- options->addOptionChaining("systemLog.destination",
- "",
- moe::String,
- "Destination of system log output. (syslog/file)")
+ options
+ ->addOptionChaining("systemLog.destination",
+ "",
+ moe::String,
+ "Destination of system log output. (syslog/file)")
.setSources(moe::SourceYAMLConfig)
.hidden()
.format("(:?syslog)|(:?file)", "(syslog/file)");
#ifndef _WIN32
- options->addOptionChaining("syslog",
- "syslog",
- moe::Switch,
- "log to system's syslog facility instead of file or stdout")
+ options
+ ->addOptionChaining("syslog",
+ "syslog",
+ moe::Switch,
+ "log to system's syslog facility instead of file or stdout")
.incompatibleWith("logpath")
.setSources(moe::SourceAllLegacy);
@@ -267,10 +257,10 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
moe::String,
"full path to pidfile (if not set, no pidfile is created)");
- options->addOptionChaining("security.keyFile",
- "keyFile",
- moe::String,
- "private key for cluster authentication").incompatibleWith("noauth");
+ options
+ ->addOptionChaining(
+ "security.keyFile", "keyFile", moe::String, "private key for cluster authentication")
+ .incompatibleWith("noauth");
options->addOptionChaining("noauth", "noauth", moe::Switch, "run without security")
.setSources(moe::SourceAllLegacy)
@@ -279,46 +269,52 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
.incompatibleWith("transitionToAuth")
.incompatibleWith("clusterAuthMode");
- options->addOptionChaining(
- "setParameter", "setParameter", moe::StringMap, "Set a configurable parameter")
+ options
+ ->addOptionChaining(
+ "setParameter", "setParameter", moe::StringMap, "Set a configurable parameter")
.composing();
- options->addOptionChaining(
- "httpinterface", "httpinterface", moe::Switch, "enable http interface")
+ options
+ ->addOptionChaining("httpinterface", "httpinterface", moe::Switch, "enable http interface")
.setSources(moe::SourceAllLegacy)
.incompatibleWith("nohttpinterface");
options->addOptionChaining("net.http.enabled", "", moe::Bool, "enable http interface")
.setSources(moe::SourceYAMLConfig);
- options->addOptionChaining(
- "net.http.port", "", moe::Switch, "port to listen on for http interface")
+ options
+ ->addOptionChaining(
+ "net.http.port", "", moe::Switch, "port to listen on for http interface")
.setSources(moe::SourceYAMLConfig);
- options->addOptionChaining(
- "security.transitionToAuth",
- "transitionToAuth",
- moe::Switch,
- "For rolling access control upgrade. Attempt to authenticate over outgoing "
- "connections and proceed regardless of success. Accept incoming connections "
- "with or without authentication.").incompatibleWith("noauth");
+ options
+ ->addOptionChaining(
+ "security.transitionToAuth",
+ "transitionToAuth",
+ moe::Switch,
+ "For rolling access control upgrade. Attempt to authenticate over outgoing "
+ "connections and proceed regardless of success. Accept incoming connections "
+ "with or without authentication.")
+ .incompatibleWith("noauth");
- options->addOptionChaining(
- "security.clusterAuthMode",
- "clusterAuthMode",
- moe::String,
- "Authentication mode used for cluster authentication. Alternatives are "
- "(keyFile|sendKeyFile|sendX509|x509)")
+ options
+ ->addOptionChaining("security.clusterAuthMode",
+ "clusterAuthMode",
+ moe::String,
+ "Authentication mode used for cluster authentication. Alternatives are "
+ "(keyFile|sendKeyFile|sendX509|x509)")
.format("(:?keyFile)|(:?sendKeyFile)|(:?sendX509)|(:?x509)",
"(keyFile/sendKeyFile/sendX509/x509)");
#ifndef _WIN32
- options->addOptionChaining(
- "nounixsocket", "nounixsocket", moe::Switch, "disable listening on unix sockets")
+ options
+ ->addOptionChaining(
+ "nounixsocket", "nounixsocket", moe::Switch, "disable listening on unix sockets")
.setSources(moe::SourceAllLegacy);
- options->addOptionChaining(
- "net.unixDomainSocket.enabled", "", moe::Bool, "disable listening on unix sockets")
+ options
+ ->addOptionChaining(
+ "net.unixDomainSocket.enabled", "", moe::Bool, "disable listening on unix sockets")
.setSources(moe::SourceYAMLConfig);
options->addOptionChaining("net.unixDomainSocket.pathPrefix",
@@ -344,45 +340,52 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
}
// Extra hidden options
- options->addOptionChaining(
- "nohttpinterface", "nohttpinterface", moe::Switch, "disable http interface")
+ options
+ ->addOptionChaining(
+ "nohttpinterface", "nohttpinterface", moe::Switch, "disable http interface")
.hidden()
.setSources(moe::SourceAllLegacy)
.incompatibleWith("httpinterface");
- options->addOptionChaining("objcheck",
- "objcheck",
- moe::Switch,
- "inspect client data for validity on receipt (DEFAULT)")
+ options
+ ->addOptionChaining("objcheck",
+ "objcheck",
+ moe::Switch,
+ "inspect client data for validity on receipt (DEFAULT)")
.hidden()
.setSources(moe::SourceAllLegacy)
.incompatibleWith("noobjcheck");
- options->addOptionChaining("noobjcheck",
- "noobjcheck",
- moe::Switch,
- "do NOT inspect client data for validity on receipt")
+ options
+ ->addOptionChaining("noobjcheck",
+ "noobjcheck",
+ moe::Switch,
+ "do NOT inspect client data for validity on receipt")
.hidden()
.setSources(moe::SourceAllLegacy)
.incompatibleWith("objcheck");
- options->addOptionChaining("net.wireObjectCheck",
- "",
- moe::Bool,
- "inspect client data for validity on receipt (DEFAULT)")
+ options
+ ->addOptionChaining("net.wireObjectCheck",
+ "",
+ moe::Bool,
+ "inspect client data for validity on receipt (DEFAULT)")
.hidden()
.setSources(moe::SourceYAMLConfig);
- options->addOptionChaining("systemLog.traceAllExceptions",
- "traceExceptions",
- moe::Switch,
- "log stack traces for every exception").hidden();
+ options
+ ->addOptionChaining("systemLog.traceAllExceptions",
+ "traceExceptions",
+ moe::Switch,
+ "log stack traces for every exception")
+ .hidden();
- options->addOptionChaining("enableExperimentalStorageDetailsCmd",
- "enableExperimentalStorageDetailsCmd",
- moe::Switch,
- "EXPERIMENTAL (UNSUPPORTED). "
- "Enable command computing aggregate statistics on storage.")
+ options
+ ->addOptionChaining("enableExperimentalStorageDetailsCmd",
+ "enableExperimentalStorageDetailsCmd",
+ moe::Switch,
+ "EXPERIMENTAL (UNSUPPORTED). "
+ "Enable command computing aggregate statistics on storage.")
.hidden()
.setSources(moe::SourceAllLegacy);
@@ -396,11 +399,12 @@ Status addWindowsServerOptions(moe::OptionSection* options) {
options->addOptionChaining("remove", "remove", moe::Switch, "remove Windows service")
.setSources(moe::SourceAllLegacy);
- options->addOptionChaining(
- "reinstall",
- "reinstall",
- moe::Switch,
- "reinstall Windows service (equivalent to --remove followed by --install)")
+ options
+ ->addOptionChaining(
+ "reinstall",
+ "reinstall",
+ moe::Switch,
+ "reinstall Windows service (equivalent to --remove followed by --install)")
.setSources(moe::SourceAllLegacy);
options->addOptionChaining("processManagement.windowsService.serviceName",
diff --git a/src/mongo/db/server_parameters.h b/src/mongo/db/server_parameters.h
index 9415d3470ab..9ce8d8b76be 100644
--- a/src/mongo/db/server_parameters.h
+++ b/src/mongo/db/server_parameters.h
@@ -30,8 +30,8 @@
#pragma once
-#include <string>
#include <map>
+#include <string>
#include "mongo/base/status.h"
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/db/service_context_d.cpp b/src/mongo/db/service_context_d.cpp
index 03d2f8ff7f1..1f16d4c0090 100644
--- a/src/mongo/db/service_context_d.cpp
+++ b/src/mongo/db/service_context_d.cpp
@@ -85,7 +85,9 @@ void ServiceContextMongoD::createLockFile() {
} catch (const std::exception& ex) {
uassert(28596,
str::stream() << "Unable to determine status of lock file in the data directory "
- << storageGlobalParams.dbpath << ": " << ex.what(),
+ << storageGlobalParams.dbpath
+ << ": "
+ << ex.what(),
false);
}
bool wasUnclean = _lockFile->createdByUncleanShutdown();
@@ -127,12 +129,14 @@ void ServiceContextMongoD::initializeGlobalStorageEngine() {
if (factory) {
uassert(28662,
- str::stream()
- << "Cannot start server. Detected data files in " << dbpath
- << " created by"
- << " the '" << *existingStorageEngine << "' storage engine, but the"
- << " specified storage engine was '" << factory->getCanonicalName()
- << "'.",
+ str::stream() << "Cannot start server. Detected data files in " << dbpath
+ << " created by"
+ << " the '"
+ << *existingStorageEngine
+ << "' storage engine, but the"
+ << " specified storage engine was '"
+ << factory->getCanonicalName()
+ << "'.",
factory->getCanonicalName() == *existingStorageEngine);
}
} else {
@@ -164,7 +168,8 @@ void ServiceContextMongoD::initializeGlobalStorageEngine() {
uassert(34368,
str::stream()
<< "Server was started in read-only mode, but the configured storage engine, "
- << storageGlobalParams.engine << ", does not support read-only operation",
+ << storageGlobalParams.engine
+ << ", does not support read-only operation",
factory->supportsReadOnly());
}
diff --git a/src/mongo/db/service_context_noop.cpp b/src/mongo/db/service_context_noop.cpp
index 2703edea8ff..4e9b67fc28f 100644
--- a/src/mongo/db/service_context_noop.cpp
+++ b/src/mongo/db/service_context_noop.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/service_context_noop.h"
-#include "mongo/db/operation_context_noop.h"
#include "mongo/db/op_observer.h"
+#include "mongo/db/operation_context_noop.h"
#include "mongo/stdx/memory.h"
namespace mongo {
diff --git a/src/mongo/db/sorter/sorter.cpp b/src/mongo/db/sorter/sorter.cpp
index fda8ee146ed..7f6e8866562 100644
--- a/src/mongo/db/sorter/sorter.cpp
+++ b/src/mongo/db/sorter/sorter.cpp
@@ -146,8 +146,7 @@ public:
/// Any number of values
template <typename Container>
- InMemIterator(const Container& input)
- : _data(input.begin(), input.end()) {}
+ InMemIterator(const Container& input) : _data(input.begin(), input.end()) {}
bool more() {
return !_data.empty();
@@ -167,7 +166,8 @@ template <typename Key, typename Value>
class FileIterator : public SortIteratorInterface<Key, Value> {
public:
typedef std::pair<typename Key::SorterDeserializeSettings,
- typename Value::SorterDeserializeSettings> Settings;
+ typename Value::SorterDeserializeSettings>
+ Settings;
typedef std::pair<Key, Value> Data;
FileIterator(const std::string& fileName,
@@ -179,8 +179,8 @@ public:
_fileDeleter(fileDeleter),
_file(_fileName.c_str(), std::ios::in | std::ios::binary) {
massert(16814,
- str::stream() << "error opening file \"" << _fileName
- << "\": " << myErrnoWithDescription(),
+ str::stream() << "error opening file \"" << _fileName << "\": "
+ << myErrnoWithDescription(),
_file.good());
massert(16815,
@@ -274,8 +274,8 @@ private:
}
msgasserted(16817,
- str::stream() << "error reading file \"" << _fileName
- << "\": " << myErrnoWithDescription());
+ str::stream() << "error reading file \"" << _fileName << "\": "
+ << myErrnoWithDescription());
}
verify(_file.gcount() == static_cast<std::streamsize>(size));
}
@@ -419,7 +419,8 @@ public:
typedef std::pair<Key, Value> Data;
typedef SortIteratorInterface<Key, Value> Iterator;
typedef std::pair<typename Key::SorterDeserializeSettings,
- typename Value::SorterDeserializeSettings> Settings;
+ typename Value::SorterDeserializeSettings>
+ Settings;
NoLimitSorter(const SortOptions& opts,
const Comparator& comp,
@@ -489,7 +490,8 @@ private:
// need to be revisited.
uasserted(16819,
str::stream()
- << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
+ << "Sort exceeded memory limit of "
+ << _opts.maxMemoryUsageBytes
<< " bytes, but did not opt in to external sorting. Aborting operation."
<< " Pass allowDiskUse:true to opt in.");
}
@@ -569,7 +571,8 @@ public:
typedef std::pair<Key, Value> Data;
typedef SortIteratorInterface<Key, Value> Iterator;
typedef std::pair<typename Key::SorterDeserializeSettings,
- typename Value::SorterDeserializeSettings> Settings;
+ typename Value::SorterDeserializeSettings>
+ Settings;
TopKSorter(const SortOptions& opts,
const Comparator& comp,
@@ -765,7 +768,8 @@ private:
// need to be revisited.
uasserted(16820,
str::stream()
- << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
+ << "Sort exceeded memory limit of "
+ << _opts.maxMemoryUsageBytes
<< " bytes, but did not opt in to external sorting. Aborting operation."
<< " Pass allowDiskUse:true to opt in.");
}
@@ -840,8 +844,8 @@ SortedFileWriter<Key, Value>::SortedFileWriter(const SortOptions& opts, const Se
_file.open(_fileName.c_str(), std::ios::binary | std::ios::out);
massert(16818,
- str::stream() << "error opening file \"" << _fileName
- << "\": " << sorter::myErrnoWithDescription(),
+ str::stream() << "error opening file \"" << _fileName << "\": "
+ << sorter::myErrnoWithDescription(),
_file.good());
_fileDeleter = std::make_shared<sorter::FileDeleter>(_fileName);
@@ -905,8 +909,8 @@ void SortedFileWriter<Key, Value>::spill() {
} catch (const std::exception&) {
msgasserted(16821,
- str::stream() << "error writing to file \"" << _fileName
- << "\": " << sorter::myErrnoWithDescription());
+ str::stream() << "error writing to file \"" << _fileName << "\": "
+ << sorter::myErrnoWithDescription());
}
_buffer.reset();
diff --git a/src/mongo/db/sorter/sorter.h b/src/mongo/db/sorter/sorter.h
index ba6f3e3192f..54f19dd0197 100644
--- a/src/mongo/db/sorter/sorter.h
+++ b/src/mongo/db/sorter/sorter.h
@@ -159,7 +159,8 @@ public:
typedef std::pair<Key, Value> Data;
typedef SortIteratorInterface<Key, Value> Iterator;
typedef std::pair<typename Key::SorterDeserializeSettings,
- typename Value::SorterDeserializeSettings> Settings;
+ typename Value::SorterDeserializeSettings>
+ Settings;
template <typename Comparator>
static Sorter* make(const SortOptions& opts,
@@ -187,7 +188,8 @@ class SortedFileWriter {
public:
typedef SortIteratorInterface<Key, Value> Iterator;
typedef std::pair<typename Key::SorterDeserializeSettings,
- typename Value::SorterDeserializeSettings> Settings;
+ typename Value::SorterDeserializeSettings>
+ Settings;
explicit SortedFileWriter(const SortOptions& opts, const Settings& settings = Settings());
diff --git a/src/mongo/db/sorter/sorter_test.cpp b/src/mongo/db/sorter/sorter_test.cpp
index c0a90a5fc1d..37bf118fe98 100644
--- a/src/mongo/db/sorter/sorter_test.cpp
+++ b/src/mongo/db/sorter/sorter_test.cpp
@@ -32,9 +32,9 @@
#include <boost/filesystem.hpp>
-#include "mongo/config.h"
#include "mongo/base/data_type_endian.h"
#include "mongo/base/init.h"
+#include "mongo/config.h"
#include "mongo/db/service_context.h"
#include "mongo/db/service_context_noop.h"
#include "mongo/stdx/memory.h"
@@ -192,7 +192,7 @@ void _assertIteratorsEquivalent(It1 it1, It2 it2, int line) {
#define ASSERT_ITERATORS_EQUIVALENT(it1, it2) _assertIteratorsEquivalent(it1, it2, __LINE__)
template <int N>
-std::shared_ptr<IWIterator> makeInMemIterator(const int(&array)[N]) {
+std::shared_ptr<IWIterator> makeInMemIterator(const int (&array)[N]) {
std::vector<IWPair> vec;
for (int i = 0; i < N; i++)
vec.push_back(IWPair(array[i], -array[i]));
@@ -200,7 +200,7 @@ std::shared_ptr<IWIterator> makeInMemIterator(const int(&array)[N]) {
}
template <typename IteratorPtr, int N>
-std::shared_ptr<IWIterator> mergeIterators(IteratorPtr(&array)[N],
+std::shared_ptr<IWIterator> mergeIterators(IteratorPtr (&array)[N],
Direction Dir = ASC,
const SortOptions& opts = SortOptions()) {
std::vector<std::shared_ptr<IWIterator>> vec;
diff --git a/src/mongo/db/startup_warnings_common.cpp b/src/mongo/db/startup_warnings_common.cpp
index f5e3706357d..d1e69ead044 100644
--- a/src/mongo/db/startup_warnings_common.cpp
+++ b/src/mongo/db/startup_warnings_common.cpp
@@ -79,7 +79,8 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
log() << "** WARNING: Access control is not enabled for the database."
<< startupWarningsLog;
log() << "** Read and write access to data and configuration is "
- "unrestricted." << startupWarningsLog;
+ "unrestricted."
+ << startupWarningsLog;
warned = true;
}
diff --git a/src/mongo/db/startup_warnings_mongod.cpp b/src/mongo/db/startup_warnings_mongod.cpp
index f40e3914b4e..30c8b261fdc 100644
--- a/src/mongo/db/startup_warnings_mongod.cpp
+++ b/src/mongo/db/startup_warnings_mongod.cpp
@@ -41,8 +41,8 @@
#include "mongo/db/server_options.h"
#include "mongo/db/startup_warnings_common.h"
#include "mongo/db/storage/storage_options.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
#include "mongo/util/processinfo.h"
#include "mongo/util/version.h"
@@ -109,9 +109,9 @@ StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter
opMode = line.substr(posBegin + 1, posEnd - posBegin - 1);
if (opMode.empty()) {
- return StatusWith<std::string>(ErrorCodes::BadValue,
- str::stream() << "invalid mode in " << filename << ": '"
- << line << "'");
+ return StatusWith<std::string>(
+ ErrorCodes::BadValue,
+ str::stream() << "invalid mode in " << filename << ": '" << line << "'");
}
// Check against acceptable values of opMode.
@@ -120,12 +120,16 @@ StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter
ErrorCodes::BadValue,
str::stream()
<< "** WARNING: unrecognized transparent Huge Pages mode of operation in "
- << filename << ": '" << opMode << "''");
+ << filename
+ << ": '"
+ << opMode
+ << "''");
}
} catch (const boost::filesystem::filesystem_error& err) {
return StatusWith<std::string>(ErrorCodes::UnknownError,
str::stream() << "Failed to probe \"" << err.path1().string()
- << "\": " << err.code().message());
+ << "\": "
+ << err.code().message());
}
return StatusWith<std::string>(opMode);
diff --git a/src/mongo/db/stats/counters.h b/src/mongo/db/stats/counters.h
index cae0cb0ad16..fe86aa16d67 100644
--- a/src/mongo/db/stats/counters.h
+++ b/src/mongo/db/stats/counters.h
@@ -29,12 +29,12 @@
#pragma once
-#include "mongo/platform/basic.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/basic.h"
+#include "mongo/util/concurrency/spin_lock.h"
#include "mongo/util/net/message.h"
#include "mongo/util/processinfo.h"
-#include "mongo/util/concurrency/spin_lock.h"
namespace mongo {
diff --git a/src/mongo/db/stats/timer_stats_test.cpp b/src/mongo/db/stats/timer_stats_test.cpp
index cd755386329..c1284c55357 100644
--- a/src/mongo/db/stats/timer_stats_test.cpp
+++ b/src/mongo/db/stats/timer_stats_test.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/stats/timer_stats.h"
-#include "mongo/util/time_support.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/time_support.h"
namespace {
diff --git a/src/mongo/db/storage/devnull/devnull_init.cpp b/src/mongo/db/storage/devnull/devnull_init.cpp
index afdfecc1457..b1c73dbbbcd 100644
--- a/src/mongo/db/storage/devnull/devnull_init.cpp
+++ b/src/mongo/db/storage/devnull/devnull_init.cpp
@@ -30,8 +30,8 @@
*/
#include "mongo/base/init.h"
-#include "mongo/db/service_context_d.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/service_context_d.h"
#include "mongo/db/storage/devnull/devnull_kv_engine.h"
#include "mongo/db/storage/kv/kv_storage_engine.h"
#include "mongo/db/storage/storage_options.h"
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
index a308b6d7984..a036b05c44e 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
@@ -35,8 +35,8 @@
#include <set>
#include "mongo/db/catalog/index_catalog_entry.h"
-#include "mongo/db/storage/index_entry_comparison.h"
#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
+#include "mongo/db/storage/index_entry_comparison.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp
index 300929ae41d..9b36c222c53 100644
--- a/src/mongo/db/storage/key_string.cpp
+++ b/src/mongo/db/storage/key_string.cpp
@@ -718,9 +718,9 @@ void KeyString::_appendNumberDecimal(const Decimal128 dec, bool invert) {
// in the normal range of double, so the decimal can be represented with at least 15 digits
// of precision by the double 'bin'
} else if (dec.getCoefficientHigh() == 0 && dec.getCoefficientLow() < k1E15) {
- dassert(Decimal128(std::abs(bin),
- Decimal128::kRoundTo15Digits,
- Decimal128::kRoundTowardPositive).isEqual(dec.toAbs()));
+ dassert(Decimal128(
+ std::abs(bin), Decimal128::kRoundTo15Digits, Decimal128::kRoundTowardPositive)
+ .isEqual(dec.toAbs()));
_appendDoubleWithoutTypeBits(bin, kDCMEqualToDoubleRoundedUpTo15Digits, invert);
return;
} else {
@@ -752,10 +752,10 @@ void KeyString::_appendNumberDecimal(const Decimal128 dec, bool invert) {
// Now we know that we can recover the original decimal value (but not its precision, which is
// given by the type bits) from the binary double plus the decimal continuation.
uint64_t decimalContinuation = decDiff.getCoefficientLow();
- dassert(storedValue.add(Decimal128(isNegative,
- storedValue.getBiasedExponent(),
- 0,
- decimalContinuation)).isEqual(dec));
+ dassert(
+ storedValue
+ .add(Decimal128(isNegative, storedValue.getBiasedExponent(), 0, decimalContinuation))
+ .isEqual(dec));
decimalContinuation = endian::nativeToBig(decimalContinuation);
_append(decimalContinuation, isNegative ? !invert : invert);
}
@@ -976,9 +976,10 @@ void KeyString::_appendTinyDecimalWithoutTypeBits(const Decimal128 dec,
_append(endian::nativeToBig(encoded), isNegative ? !invert : invert);
Decimal128 storedVal(scaledBin, Decimal128::kRoundTo34Digits, Decimal128::kRoundTowardPositive);
- storedVal = storedVal.multiply(kTinyDoubleExponentDownshiftFactorAsDecimal,
- Decimal128::kRoundTowardZero)
- .add(Decimal128::kLargestNegativeExponentZero);
+ storedVal =
+ storedVal
+ .multiply(kTinyDoubleExponentDownshiftFactorAsDecimal, Decimal128::kRoundTowardZero)
+ .add(Decimal128::kLargestNegativeExponentZero);
dassert(storedVal.isLess(magnitude));
Decimal128 decDiff = magnitude.subtract(storedVal);
dassert(decDiff.getBiasedExponent() == storedVal.getBiasedExponent() || decDiff.isZero());
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp b/src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp
index a37eb986256..4bbc373c9c2 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp
@@ -33,9 +33,9 @@
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/database_catalog_entry.h"
-#include "mongo/db/operation_context_noop.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/multikey_paths.h"
+#include "mongo/db/operation_context_noop.h"
#include "mongo/db/storage/devnull/devnull_kv_engine.h"
#include "mongo/db/storage/kv/kv_engine.h"
#include "mongo/db/storage/kv/kv_storage_engine.h"
@@ -105,7 +105,8 @@ public:
bool match = (expected == actual);
if (!match) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expected) << ", "
- << "Actual: " << dumpMultikeyPaths(actual));
+ << "Actual: "
+ << dumpMultikeyPaths(actual));
}
ASSERT(match);
}
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index 6968d06d063..98f00ff1d07 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/storage/kv/kv_engine_test_harness.h"
-#include "mongo/db/operation_context_noop.h"
#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/operation_context_noop.h"
#include "mongo/db/storage/kv/kv_catalog.h"
#include "mongo/db/storage/kv/kv_engine.h"
#include "mongo/db/storage/record_store.h"
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
index ff42dd15c16..5a3914ea072 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
@@ -38,8 +38,8 @@
#include "mongo/db/storage/mmap_v1/btree/btree_logic.h"
#include "mongo/db/storage/mmap_v1/btree/key.h"
#include "mongo/db/storage/mmap_v1/diskloc.h"
-#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/mmap_v1/record_store_v1_base.h"
+#include "mongo/db/storage/record_store.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -396,7 +396,8 @@ bool BtreeLogic<BtreeLayout>::pushBack(BucketType* bucket,
const FullKey klast = getFullKey(bucket, bucket->n - 1);
if (klast.data.woCompare(key, _ordering) > 0) {
log() << "btree bucket corrupt? "
- "consider reindexing or running validate command" << endl;
+ "consider reindexing or running validate command"
+ << endl;
log() << " klast: " << klast.data.toString() << endl;
log() << " key: " << key.toString() << endl;
invariant(false);
diff --git a/src/mongo/db/storage/mmap_v1/catalog/hashtab.h b/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
index ff2e889a202..ecd0f8c4b4e 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
@@ -28,9 +28,9 @@
#pragma once
+#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/catalog/namespace.h"
#include "mongo/db/storage/mmap_v1/catalog/namespace_details.h"
-#include "mongo/db/operation_context.h"
#include "mongo/stdx/functional.h"
namespace mongo {
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
index 915e3a7e44d..dc270d813b2 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
@@ -42,10 +42,10 @@
#include "mongo/db/db.h"
#include "mongo/db/index_legacy.h"
#include "mongo/db/json.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/ops/update.h"
#include "mongo/db/storage/mmap_v1/catalog/namespace_index.h"
-#include "mongo/db/operation_context.h"
#include "mongo/scripting/engine.h"
#include "mongo/util/startup_test.h"
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
index cc5e57e0868..acc2460f1ec 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
@@ -401,7 +401,8 @@ void NamespaceDetailsCollectionCatalogEntry::updateValidator(OperationContext* t
_updateSystemNamespaces(
txn,
BSON("$set" << BSON("options.validator" << validator << "options.validationLevel"
- << validationLevel << "options.validationAction"
+ << validationLevel
+ << "options.validationAction"
<< validationAction)));
}
diff --git a/src/mongo/db/storage/mmap_v1/data_file.cpp b/src/mongo/db/storage/mmap_v1/data_file.cpp
index 64fb1e64066..b5fc8bf4d6c 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file.cpp
@@ -38,11 +38,11 @@
#include <utility>
#include <vector>
+#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/storage/mmap_v1/durable_mapped_file.h"
-#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
-#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/file_allocator.h"
+#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -211,7 +211,9 @@ void DataFileHeader::init(OperationContext* txn, int fileno, int filelength, con
massert(13640,
str::stream() << "DataFileHeader looks corrupt at file open filelength:"
- << filelength << " fileno:" << fileno,
+ << filelength
+ << " fileno:"
+ << fileno,
filelength > 32768);
// The writes done in this function must not be rolled back. If the containing
diff --git a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
index 4fafae825ea..61e52b5dedf 100644
--- a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
@@ -34,8 +34,8 @@
#include "mongo/db/client.h"
#include "mongo/db/commands/server_status_metric.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/instance.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/mmap_v1/dur_journal.h"
#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index db74bd99ea8..9bd44ba3ee3 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -488,10 +488,15 @@ void Stats::S::_asObj(BSONObjBuilder* builder) const {
<< _journaledBytes / (_uncompressedBytes + 1.0) << "commitsInWriteLock" << _commitsInWriteLock
<< "earlyCommits" << 0 << "timeMs"
<< BSON("dt" << _durationMillis << "prepLogBuffer" << (unsigned)(_prepLogBufferMicros / 1000)
- << "writeToJournal" << (unsigned)(_writeToJournalMicros / 1000)
- << "writeToDataFiles" << (unsigned)(_writeToDataFilesMicros / 1000)
- << "remapPrivateView" << (unsigned)(_remapPrivateViewMicros / 1000) << "commits"
- << (unsigned)(_commitsMicros / 1000) << "commitsInWriteLock"
+ << "writeToJournal"
+ << (unsigned)(_writeToJournalMicros / 1000)
+ << "writeToDataFiles"
+ << (unsigned)(_writeToDataFilesMicros / 1000)
+ << "remapPrivateView"
+ << (unsigned)(_remapPrivateViewMicros / 1000)
+ << "commits"
+ << (unsigned)(_commitsMicros / 1000)
+ << "commitsInWriteLock"
<< (unsigned)(_commitsInWriteLockMicros / 1000));
if (storageGlobalParams.journalCommitIntervalMs != 0) {
diff --git a/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp b/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp
index aff01c1c7bf..6a8ca62f15d 100644
--- a/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp
@@ -37,8 +37,8 @@
#include <iostream>
#include "mongo/db/storage/mmap_v1/dur.h"
-#include "mongo/db/storage/mmap_v1/durable_mapped_file.h"
#include "mongo/db/storage/mmap_v1/dur_stats.h"
+#include "mongo/db/storage/mmap_v1/durable_mapped_file.h"
#include "mongo/util/log.h"
#include "mongo/util/stacktrace.h"
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal.cpp b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
index 524085a87e9..91a6d6fd569 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
@@ -40,13 +40,13 @@
#include "mongo/base/init.h"
#include "mongo/config.h"
#include "mongo/db/client.h"
-#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/aligned_builder.h"
#include "mongo/db/storage/mmap_v1/compress.h"
#include "mongo/db/storage/mmap_v1/dur_journalformat.h"
#include "mongo/db/storage/mmap_v1/dur_journalimpl.h"
#include "mongo/db/storage/mmap_v1/dur_stats.h"
#include "mongo/db/storage/mmap_v1/logfile.h"
+#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/db/storage/paths.h"
#include "mongo/db/storage/storage_options.h"
@@ -569,10 +569,10 @@ void LSNFile::set(unsigned long long x) {
if something highly surprising, throws to abort
*/
unsigned long long LSNFile::get() {
- uassert(
- 13614,
- str::stream() << "unexpected version number of lsn file in journal/ directory got: " << ver,
- ver == 0);
+ uassert(13614,
+ str::stream() << "unexpected version number of lsn file in journal/ directory got: "
+ << ver,
+ ver == 0);
if (~lsn != checkbytes) {
log() << "lsnfile not valid. recovery will be from log start. lsn: " << hex << lsn
<< " checkbytes: " << hex << checkbytes << endl;
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index dfd429d0713..15e7e994b38 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -47,8 +47,8 @@
#include "mongo/db/storage/mmap_v1/dur_journal.h"
#include "mongo/db/storage/mmap_v1/dur_journalformat.h"
#include "mongo/db/storage/mmap_v1/dur_stats.h"
-#include "mongo/db/storage/mmap_v1/durop.h"
#include "mongo/db/storage/mmap_v1/durable_mapped_file.h"
+#include "mongo/db/storage/mmap_v1/durop.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/platform/strnlen.h"
#include "mongo/util/bufreader.h"
@@ -92,7 +92,7 @@ void removeJournalFiles();
boost::filesystem::path getJournalDir();
-struct ParsedJournalEntry {/*copyable*/
+struct ParsedJournalEntry { /*copyable*/
ParsedJournalEntry() : e(0) {}
// relative path of database for the operation.
@@ -121,7 +121,8 @@ static void getFiles(boost::filesystem::path dir, vector<boost::filesystem::path
if (m.count(u)) {
uasserted(13531,
str::stream() << "unexpected files in journal directory " << dir.string()
- << " : " << fileName);
+ << " : "
+ << fileName);
}
m.insert(pair<unsigned, boost::filesystem::path>(u, filepath));
}
@@ -130,7 +131,8 @@ static void getFiles(boost::filesystem::path dir, vector<boost::filesystem::path
if (i != m.begin() && m.count(i->first - 1) == 0) {
uasserted(13532,
str::stream() << "unexpected file in journal directory " << dir.string()
- << " : " << boost::filesystem::path(i->second).leaf().string()
+ << " : "
+ << boost::filesystem::path(i->second).leaf().string()
<< " : can't find its preceding file");
}
files.push_back(i->second);
@@ -489,7 +491,8 @@ bool RecoveryJob::processFileBuffer(const void* p, unsigned len) {
log() << "journal file version number mismatch got:" << hex << h._version
<< " expected:" << hex << (unsigned)JHeader::CurrentVersion
<< ". if you have just upgraded, recover with old version of mongod, "
- "terminate cleanly, then upgrade." << endl;
+ "terminate cleanly, then upgrade."
+ << endl;
// Not using JournalSectionCurruptException as we don't want to ignore
// journal files on upgrade.
uasserted(13536, str::stream() << "journal version number mismatch " << h._version);
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
index 6eb2f82dcee..ff8add6cb91 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
@@ -46,8 +46,8 @@
#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/storage/mmap_v1/dur_journalformat.h"
#include "mongo/db/storage/storage_options.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
#include "mongo/util/processinfo.h"
using namespace mongoutils;
diff --git a/src/mongo/db/storage/mmap_v1/durop.cpp b/src/mongo/db/storage/mmap_v1/durop.cpp
index 0ea1949ad12..627d53df05d 100644
--- a/src/mongo/db/storage/mmap_v1/durop.cpp
+++ b/src/mongo/db/storage/mmap_v1/durop.cpp
@@ -39,9 +39,9 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/storage/mmap_v1/aligned_builder.h"
#include "mongo/db/storage/mmap_v1/durable_mapped_file.h"
+#include "mongo/db/storage/mmap_v1/file_allocator.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_engine.h"
#include "mongo/util/file.h"
-#include "mongo/db/storage/mmap_v1/file_allocator.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/storage/mmap_v1/durop.h b/src/mongo/db/storage/mmap_v1/durop.h
index 50ddc33318a..98aaf8406c3 100644
--- a/src/mongo/db/storage/mmap_v1/durop.h
+++ b/src/mongo/db/storage/mmap_v1/durop.h
@@ -49,7 +49,7 @@ namespace dur {
*
* For each op we want to journal, we define a subclass.
*/
-class DurOp {/* copyable */
+class DurOp { /* copyable */
public:
// @param opcode a sentinel value near max unsigned which uniquely identifies the operation.
// @see dur::JEntry
diff --git a/src/mongo/db/storage/mmap_v1/extent.cpp b/src/mongo/db/storage/mmap_v1/extent.cpp
index 7b92551fa8a..fb134504f10 100644
--- a/src/mongo/db/storage/mmap_v1/extent.cpp
+++ b/src/mongo/db/storage/mmap_v1/extent.cpp
@@ -44,8 +44,14 @@ static_assert(sizeof(Extent) - 4 == 48 + 128, "sizeof(Extent) - 4 == 48 + 128");
BSONObj Extent::dump() const {
return BSON("loc" << myLoc.toString() << "xnext" << xnext.toString() << "xprev"
- << xprev.toString() << "nsdiag" << nsDiagnostic.toString() << "size" << length
- << "firstRecord" << firstRecord.toString() << "lastRecord"
+ << xprev.toString()
+ << "nsdiag"
+ << nsDiagnostic.toString()
+ << "size"
+ << length
+ << "firstRecord"
+ << firstRecord.toString()
+ << "lastRecord"
<< lastRecord.toString());
}
diff --git a/src/mongo/db/storage/mmap_v1/file_allocator.cpp b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
index 0be9d157481..7a630ea3118 100644
--- a/src/mongo/db/storage/mmap_v1/file_allocator.cpp
+++ b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
@@ -38,8 +38,8 @@
#include <fcntl.h>
#if defined(__FreeBSD__)
-#include <sys/param.h>
#include <sys/mount.h>
+#include <sys/param.h>
#endif
#if defined(__linux__)
diff --git a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
index a67ecd85f3d..94beefc55ae 100644
--- a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
+++ b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
@@ -44,10 +44,10 @@
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/query/internal_plans.h"
-#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/mmap_v1/aligned_builder.h"
#include "mongo/db/storage/mmap_v1/logfile.h"
#include "mongo/db/storage/paths.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/scripting/engine.h"
#include "mongo/util/background.h"
#include "mongo/util/timer.h"
diff --git a/src/mongo/db/storage/mmap_v1/logfile.cpp b/src/mongo/db/storage/mmap_v1/logfile.cpp
index 6124ce51248..ddc66b8ce2f 100644
--- a/src/mongo/db/storage/mmap_v1/logfile.cpp
+++ b/src/mongo/db/storage/mmap_v1/logfile.cpp
@@ -123,7 +123,9 @@ void LogFile::synchronousAppend(const void* _buf, size_t _len) {
else
uasserted(13517,
str::stream() << "error appending to file " << _name << ' ' << _len << ' '
- << toWrite << ' ' << errnoWithDescription(e));
+ << toWrite
+ << ' '
+ << errnoWithDescription(e));
} else {
dassert(written == toWrite);
}
@@ -137,10 +139,10 @@ void LogFile::synchronousAppend(const void* _buf, size_t _len) {
/// posix
-#include <sys/types.h>
-#include <sys/stat.h>
#include <fcntl.h>
#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
#ifdef __linux__
#include <linux/fs.h>
diff --git a/src/mongo/db/storage/mmap_v1/mmap.cpp b/src/mongo/db/storage/mmap_v1/mmap.cpp
index 90cda10c57e..82d62aba8e9 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap.cpp
@@ -103,7 +103,9 @@ void* MemoryMappedFile::map(const char* filename) {
} catch (boost::filesystem::filesystem_error& e) {
uasserted(15922,
mongoutils::str::stream() << "couldn't get file length when opening mapping "
- << filename << ' ' << e.what());
+ << filename
+ << ' '
+ << e.what());
}
return map(filename, l);
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_posix.cpp b/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
index a5fdc361694..382860f3556 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
@@ -37,10 +37,10 @@
#include <sys/stat.h>
#include <sys/types.h>
-#include "mongo/platform/atomic_word.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/storage/mmap_v1/file_allocator.h"
#include "mongo/db/storage/mmap_v1/mmap.h"
+#include "mongo/platform/atomic_word.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/processinfo.h"
@@ -162,7 +162,8 @@ void* MemoryMappedFile::map(const char* filename, unsigned long long& length) {
unsigned long long filelen = lseek(fd, 0, SEEK_END);
uassert(10447,
str::stream() << "map file alloc failed, wanted: " << length << " filelen: " << filelen
- << ' ' << sizeof(size_t),
+ << ' '
+ << sizeof(size_t),
filelen == length);
lseek(fd, 0, SEEK_SET);
@@ -174,7 +175,8 @@ void* MemoryMappedFile::map(const char* filename, unsigned long long& length) {
if (errno == ENOMEM) {
if (sizeof(void*) == 4)
error() << "mmap failed with out of memory. You are using a 32-bit build and "
- "probably need to upgrade to 64" << endl;
+ "probably need to upgrade to 64"
+ << endl;
else
error() << "mmap failed with out of memory. (64 bit build)" << endl;
}
@@ -202,7 +204,8 @@ void* MemoryMappedFile::createPrivateMap() {
if (errno == ENOMEM) {
if (sizeof(void*) == 4) {
error() << "mmap private failed with out of memory. You are using a 32-bit build "
- "and probably need to upgrade to 64" << endl;
+ "and probably need to upgrade to 64"
+ << endl;
} else {
error() << "mmap private failed with out of memory. (64 bit build)" << endl;
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index 83fda94c14d..37f96019430 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -37,10 +37,10 @@
#include "mongo/db/catalog/index_catalog_entry.h"
#include "mongo/db/index/2d_access_method.h"
#include "mongo/db/index/btree_access_method.h"
-#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/fts_access_method.h"
#include "mongo/db/index/hash_access_method.h"
#include "mongo/db/index/haystack_access_method.h"
+#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/s2_access_method.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/record_id.h"
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
index 3ecb76b9ecb..d02f7da17b0 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
@@ -34,8 +34,8 @@
#include "mongo/base/status.h"
#include "mongo/base/string_data.h"
#include "mongo/db/catalog/database_catalog_entry.h"
-#include "mongo/db/storage/mmap_v1/catalog/namespace_index.h"
#include "mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h"
+#include "mongo/db/storage/mmap_v1/catalog/namespace_index.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h"
namespace mongo {
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
index b83e72dc580..e4f4b0340de 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
@@ -32,22 +32,22 @@
#include "mongo/db/storage/mmap_v1/mmap_v1_engine.h"
-#include <boost/filesystem/path.hpp>
#include <boost/filesystem/operations.hpp>
+#include <boost/filesystem/path.hpp>
#include <fstream>
#include "mongo/db/mongod_options.h"
-#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/data_file_sync.h"
#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/storage/mmap_v1/dur_journal.h"
#include "mongo/db/storage/mmap_v1/dur_recover.h"
#include "mongo/db/storage/mmap_v1/dur_recovery_unit.h"
+#include "mongo/db/storage/mmap_v1/file_allocator.h"
+#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/db/storage/storage_engine_lock_file.h"
#include "mongo/db/storage/storage_options.h"
-#include "mongo/db/storage/mmap_v1/file_allocator.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/log.h"
@@ -132,9 +132,11 @@ void checkForUncleanShutdown(MMAPV1Engine* storageEngine,
if (!storageGlobalParams.dur && dur::haveJournalFiles()) {
log() << "**************" << endl;
log() << "Error: journal files are present in journal directory, yet starting without "
- "journaling enabled." << endl;
+ "journaling enabled."
+ << endl;
log() << "It is recommended that you start with journaling enabled so that recovery may "
- "occur." << endl;
+ "occur."
+ << endl;
log() << "**************" << endl;
uasserted(13597, "can't start without --journal enabled when journal/ files are present");
}
@@ -149,11 +151,14 @@ void checkForUncleanShutdown(MMAPV1Engine* storageEngine,
if (!storageGlobalParams.dur && dur::haveJournalFiles()) {
log() << "**************" << endl;
log() << "Error: journal files are present in journal directory, yet starting without "
- "--journal enabled." << endl;
+ "--journal enabled."
+ << endl;
log() << "It is recommended that you start with journaling enabled so that recovery may "
- "occur." << endl;
+ "occur."
+ << endl;
log() << "Alternatively (not recommended), you can backup everything, then delete the "
- "journal files, and run --repair" << endl;
+ "journal files, and run --repair"
+ << endl;
log() << "**************" << endl;
uasserted(13618, "can't start without --journal enabled when journal/ files are present");
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
index 347b6e02d17..80a20ecbb0c 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
@@ -32,8 +32,8 @@
#include <map>
-#include "mongo/db/storage/mmap_v1/record_access_tracker.h"
#include "mongo/db/storage/mmap_v1/extent_manager.h"
+#include "mongo/db/storage/mmap_v1/record_access_tracker.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/stdx/mutex.h"
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index 2076ca868b1..3fcf1205646 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -37,17 +37,17 @@
#include "mongo/base/counter.h"
#include "mongo/db/audit.h"
#include "mongo/db/client.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
-#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/storage/mmap_v1/data_file.h"
-#include "mongo/db/storage/mmap_v1/record.h"
+#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/storage/mmap_v1/extent.h"
#include "mongo/db/storage/mmap_v1/extent_manager.h"
#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_engine.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
+#include "mongo/db/storage/mmap_v1/record.h"
#include "mongo/db/storage/record_fetcher.h"
-#include "mongo/db/operation_context.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/file.h"
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp
index ab1bd378fea..19ec450e1ac 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp
@@ -29,8 +29,8 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/json.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine_metadata.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/unittest/unittest.h"
@@ -85,9 +85,13 @@ void _testValidateMetadata(const StorageEngine::Factory* factory,
if (expectedCode != status.code()) {
FAIL(str::stream()
<< "Unexpected StorageEngine::Factory::validateMetadata result. Expected: "
- << ErrorCodes::errorString(expectedCode) << " but got " << status.toString()
- << " instead. metadataOptions: " << metadataOptions
- << "; directoryPerDB: " << directoryPerDB);
+ << ErrorCodes::errorString(expectedCode)
+ << " but got "
+ << status.toString()
+ << " instead. metadataOptions: "
+ << metadataOptions
+ << "; directoryPerDB: "
+ << directoryPerDB);
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_access_tracker.cpp b/src/mongo/db/storage/mmap_v1/record_access_tracker.cpp
index 535bc3fc447..f9725f6a104 100644
--- a/src/mongo/db/storage/mmap_v1/record_access_tracker.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_access_tracker.cpp
@@ -36,8 +36,8 @@
#include "mongo/config.h"
#include "mongo/db/storage/mmap_v1/record.h"
#include "mongo/platform/bits.h"
-#include "mongo/util/concurrency/threadlocal.h"
#include "mongo/util/clock_source.h"
+#include "mongo/util/concurrency/threadlocal.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/net/listen.h"
#include "mongo/util/processinfo.h"
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
index c0f2796ee05..7950922afd7 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
@@ -572,17 +572,17 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
if (_details->firstExtent(txn).isNull())
output->append("firstExtent", "null");
else
- output->append("firstExtent",
- str::stream()
- << _details->firstExtent(txn).toString() << " ns:"
- << _getExtent(txn, _details->firstExtent(txn))->nsDiagnostic.toString());
+ output->append(
+ "firstExtent",
+ str::stream() << _details->firstExtent(txn).toString() << " ns:"
+ << _getExtent(txn, _details->firstExtent(txn))->nsDiagnostic.toString());
if (_details->lastExtent(txn).isNull())
output->append("lastExtent", "null");
else
- output->append("lastExtent",
- str::stream()
- << _details->lastExtent(txn).toString() << " ns:"
- << _getExtent(txn, _details->lastExtent(txn))->nsDiagnostic.toString());
+ output->append(
+ "lastExtent",
+ str::stream() << _details->lastExtent(txn).toString() << " ns:"
+ << _getExtent(txn, _details->lastExtent(txn))->nsDiagnostic.toString());
// 22222222222222222222222222
{ // validate extent basics
@@ -784,9 +784,12 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
break;
}
- string err(str::stream()
- << "bad pointer in deleted record list: " << loc.toString()
- << " bucket: " << i << " k: " << k);
+ string err(str::stream() << "bad pointer in deleted record list: "
+ << loc.toString()
+ << " bucket: "
+ << i
+ << " k: "
+ << k);
results->errors.push_back(err);
results->valid = false;
break;
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
index 53b86129e13..489f084fffe 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
@@ -30,8 +30,8 @@
#pragma once
-#include "mongo/util/concurrency/spin_lock.h"
#include "mongo/platform/unordered_set.h"
+#include "mongo/util/concurrency/spin_lock.h"
#include "mongo/db/storage/mmap_v1/diskloc.h"
#include "mongo/db/storage/record_store.h"
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
index 1c678074ace..7dab4124df6 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
@@ -101,11 +101,12 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
// since we have to iterate all the extents (for now) to get
// storage size
if (lenToAlloc > storageSize(txn)) {
- return StatusWith<DiskLoc>(ErrorCodes::DocTooLargeForCapped,
- mongoutils::str::stream()
- << "document is larger than capped size " << lenToAlloc
- << " > " << storageSize(txn),
- 16328);
+ return StatusWith<DiskLoc>(
+ ErrorCodes::DocTooLargeForCapped,
+ mongoutils::str::stream() << "document is larger than capped size " << lenToAlloc
+ << " > "
+ << storageSize(txn),
+ 16328);
}
}
DiskLoc loc;
@@ -161,8 +162,10 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
return StatusWith<DiskLoc>(ErrorCodes::DocTooLargeForCapped,
str::stream()
<< "document doesn't fit in capped collection."
- << " size: " << lenToAlloc
- << " storageSize:" << storageSize(txn),
+ << " size: "
+ << lenToAlloc
+ << " storageSize:"
+ << storageSize(txn),
28575);
}
continue;
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
index 5f20ba62385..2bde7396e44 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h"
#include "mongo/db/operation_context_noop.h"
-#include "mongo/db/storage/mmap_v1/record.h"
#include "mongo/db/storage/mmap_v1/extent.h"
+#include "mongo/db/storage/mmap_v1/record.h"
#include "mongo/db/storage/mmap_v1/record_store_v1_test_help.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
index a45cb1ca9e7..b65782cd27b 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
@@ -30,8 +30,8 @@
#include <set>
-#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/mmap_v1/record_store_v1_base.h"
+#include "mongo/db/storage/record_store.h"
namespace mongo {
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
index aa29b7f0174..51baec1cd29 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
@@ -37,17 +37,17 @@
#include "mongo/base/counter.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/client.h"
-#include "mongo/db/curop.h"
#include "mongo/db/commands/server_status_metric.h"
+#include "mongo/db/curop.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/extent.h"
#include "mongo/db/storage/mmap_v1/extent_manager.h"
#include "mongo/db/storage/mmap_v1/record.h"
-#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
-#include "mongo/util/progress_meter.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/progress_meter.h"
#include "mongo/util/timer.h"
#include "mongo/util/touch_pages.h"
@@ -152,7 +152,8 @@ StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* txn,
return StatusWith<DiskLoc>(
ErrorCodes::InvalidLength,
str::stream() << "Attempting to allocate a record larger than maximum size: "
- << lengthWithHeaders << " > 16.5MB");
+ << lengthWithHeaders
+ << " > 16.5MB");
}
DiskLoc loc = _allocFromExistingExtents(txn, lengthWithHeaders);
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
index 234acf8695e..0c56ef9e6f1 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/storage/mmap_v1/record_store_v1_test_help.h"
-#include <boost/next_prior.hpp>
#include <algorithm>
+#include <boost/next_prior.hpp>
#include <map>
#include <set>
#include <vector>
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index b73d346a708..5587e4c2b9a 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -45,11 +45,11 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/storage/mmap_v1/dur.h"
+#include "mongo/db/storage/mmap_v1/file_allocator.h"
#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/util/file.h"
-#include "mongo/db/storage/mmap_v1/file_allocator.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -95,8 +95,7 @@ void _deleteDataFiles(const std::string& database) {
virtual const char* op() const {
return "remove";
}
- }
- deleter;
+ } deleter;
_applyOpToDataFiles(database, deleter, true);
}
@@ -290,9 +289,11 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
if (freeSize > -1 && freeSize < totalSize) {
return Status(ErrorCodes::OutOfDiskSpace,
- str::stream()
- << "Cannot repair database " << dbName << " having size: " << totalSize
- << " (bytes) because free disk space is: " << freeSize << " (bytes)");
+ str::stream() << "Cannot repair database " << dbName << " having size: "
+ << totalSize
+ << " (bytes) because free disk space is: "
+ << freeSize
+ << " (bytes)");
}
txn->checkForInterrupt();
diff --git a/src/mongo/db/storage/paths.cpp b/src/mongo/db/storage/paths.cpp
index 2f6fb4d4a77..b9f05ad17d1 100644
--- a/src/mongo/db/storage/paths.cpp
+++ b/src/mongo/db/storage/paths.cpp
@@ -84,8 +84,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
massert(13650,
- str::stream() << "Couldn't open directory '" << dir.string()
- << "' for flushing: " << errnoWithDescription(),
+ str::stream() << "Couldn't open directory '" << dir.string() << "' for flushing: "
+ << errnoWithDescription(),
fd >= 0);
if (fsync(fd) != 0) {
int e = errno;
@@ -102,8 +102,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
} else {
close(fd);
massert(13651,
- str::stream() << "Couldn't fsync directory '" << dir.string()
- << "': " << errnoWithDescription(e),
+ str::stream() << "Couldn't fsync directory '" << dir.string() << "': "
+ << errnoWithDescription(e),
false);
}
}
diff --git a/src/mongo/db/storage/paths.h b/src/mongo/db/storage/paths.h
index 7f9a479f416..384b6459419 100644
--- a/src/mongo/db/storage/paths.h
+++ b/src/mongo/db/storage/paths.h
@@ -31,9 +31,9 @@
#pragma once
#include <boost/filesystem/path.hpp>
-#include <sys/types.h>
-#include <sys/stat.h>
#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
index 21127d5342f..195f9e0a184 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
@@ -41,7 +41,7 @@ void testSetEndPosition_Next_Forward(bool unique, bool inclusive) {
auto sorted = harnessHelper->newSortedDataInterface(
unique,
{
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
+ {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
});
// Dup key on end point. Illegal for unique indexes.
@@ -80,7 +80,7 @@ void testSetEndPosition_Next_Reverse(bool unique, bool inclusive) {
auto sorted = harnessHelper->newSortedDataInterface(
unique,
{
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
+ {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
});
// Dup key on end point. Illegal for unique indexes.
@@ -119,10 +119,10 @@ void testSetEndPosition_Seek_Forward(bool unique, bool inclusive) {
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1},
- // No key2
- {key3, loc1},
- {key4, loc1},
+ {key1, loc1},
+ // No key2
+ {key3, loc1},
+ {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -167,10 +167,10 @@ void testSetEndPosition_Seek_Reverse(bool unique, bool inclusive) {
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1},
- {key2, loc1},
- // No key3
- {key4, loc1},
+ {key1, loc1},
+ {key2, loc1},
+ // No key3
+ {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -217,7 +217,7 @@ void testSetEndPosition_Restore_Forward(bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(
unique,
{
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
+ {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -234,7 +234,7 @@ void testSetEndPosition_Restore_Forward(bool unique) {
removeFromIndex(opCtx,
sorted,
{
- {key2, loc1}, {key3, loc1},
+ {key2, loc1}, {key3, loc1},
});
cursor->restore();
@@ -253,7 +253,7 @@ void testSetEndPosition_Restore_Reverse(bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(
unique,
{
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
+ {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -270,7 +270,7 @@ void testSetEndPosition_Restore_Reverse(bool unique) {
removeFromIndex(opCtx,
sorted,
{
- {key2, loc1}, {key3, loc1},
+ {key2, loc1}, {key3, loc1},
});
cursor->restore();
@@ -293,7 +293,7 @@ void testSetEndPosition_RestoreEndCursor_Forward(bool unique) {
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1}, {key4, loc1},
+ {key1, loc1}, {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -306,8 +306,8 @@ void testSetEndPosition_RestoreEndCursor_Forward(bool unique) {
insertToIndex(opCtx,
sorted,
{
- {key2, loc1}, // in range
- {key3, loc1}, // out of range
+ {key2, loc1}, // in range
+ {key3, loc1}, // out of range
});
cursor->restore();
@@ -327,7 +327,7 @@ void testSetEndPosition_RestoreEndCursor_Reverse(bool unique) {
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1}, {key4, loc1},
+ {key1, loc1}, {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -339,8 +339,8 @@ void testSetEndPosition_RestoreEndCursor_Reverse(bool unique) {
insertToIndex(opCtx,
sorted,
{
- {key2, loc1}, // in range
- {key3, loc1}, // out of range
+ {key2, loc1}, // in range
+ {key3, loc1}, // out of range
});
cursor->restore(); // must restore end cursor even with saveUnpositioned().
@@ -360,10 +360,11 @@ TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Reverse_Unique) {
void testSetEndPosition_Empty_Forward(bool unique, bool inclusive) {
auto harnessHelper = newHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted =
+ harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
cursor->setEndPosition(BSONObj(), inclusive);
@@ -389,10 +390,11 @@ TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Standard_Exclusive) {
void testSetEndPosition_Empty_Reverse(bool unique, bool inclusive) {
auto harnessHelper = newHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted =
+ harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
cursor->setEndPosition(BSONObj(), inclusive);
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
index d900dbdc1d7..63c3bf6bc44 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
@@ -291,7 +291,7 @@ void testSaveAndRestorePositionSeesNewInserts(bool forward, bool unique) {
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1}, {key3, loc1},
+ {key1, loc1}, {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -325,7 +325,7 @@ void testSaveAndRestorePositionSeesNewInsertsAfterRemove(bool forward, bool uniq
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1}, {key3, loc1},
+ {key1, loc1}, {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -365,7 +365,7 @@ void testSaveAndRestorePositionSeesNewInsertsAfterEOF(bool forward, bool unique)
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(false,
{
- {key1, loc1},
+ {key1, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -403,10 +403,11 @@ TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Reverse_S
TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_Forward) {
auto harnessHelper = newHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(/*isUnique*/ false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted =
+ harnessHelper->newSortedDataInterface(/*isUnique*/ false,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
@@ -482,10 +483,11 @@ TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_For
TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_Reverse) {
auto harnessHelper = newHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(/*isUnique*/ false,
- {
- {key0, loc1}, {key1, loc1}, {key2, loc2},
- });
+ auto sorted =
+ harnessHelper->newSortedDataInterface(/*isUnique*/ false,
+ {
+ {key0, loc1}, {key1, loc1}, {key2, loc2},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -561,10 +563,11 @@ TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_Rev
TEST(SortedDataInterface, SaveUnpositionedAndRestore) {
auto harnessHelper = newHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted =
+ harnessHelper->newSortedDataInterface(false,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
index c767dbee859..ae22f28c52b 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
@@ -38,10 +38,11 @@ namespace mongo {
void testSeekExact_Hit(bool unique, bool forward) {
auto harnessHelper = newHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted =
+ harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -71,9 +72,9 @@ void testSeekExact_Miss(bool unique, bool forward) {
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1},
- // No key2.
- {key3, loc1},
+ {key1, loc1},
+ // No key2.
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -105,7 +106,7 @@ TEST(SortedDataInterface, SeekExact_HitWithDups_Forward) {
auto sorted = harnessHelper->newSortedDataInterface(
false,
{
- {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
+ {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -124,7 +125,7 @@ TEST(SortedDataInterface, SeekExact_HitWithDups_Reverse) {
auto sorted = harnessHelper->newSortedDataInterface(
false,
{
- {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
+ {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
index 8506af3e43e..40ce99a6911 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
@@ -529,10 +529,10 @@ TEST(SortedDataInterface, Locate4) {
auto harnessHelper = newHarnessHelper();
auto sorted = harnessHelper->newSortedDataInterface(false,
{
- {BSON("" << 1), RecordId(1, 2)},
- {BSON("" << 1), RecordId(1, 4)},
- {BSON("" << 1), RecordId(1, 6)},
- {BSON("" << 2), RecordId(1, 8)},
+ {BSON("" << 1), RecordId(1, 2)},
+ {BSON("" << 1), RecordId(1, 4)},
+ {BSON("" << 1), RecordId(1, 6)},
+ {BSON("" << 2), RecordId(1, 8)},
});
{
diff --git a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
index de9e0bb97aa..ab5f12484ca 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
@@ -35,11 +35,11 @@
#include <boost/filesystem.hpp>
#include <fcntl.h>
#include <ostream>
+#include <sstream>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
-#include <sstream>
#include "mongo/db/storage/paths.h"
#include "mongo/platform/process_id.h"
@@ -93,7 +93,8 @@ Status StorageEngineLockFile::open() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Unable to check existence of data directory " << _dbpath
- << ": " << ex.what());
+ << ": "
+ << ex.what());
}
// Use file permissions 644
@@ -151,7 +152,9 @@ Status StorageEngineLockFile::writePid() {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id to file (ftruncate failed): "
- << _filespec << ' ' << errnoWithDescription(errorcode));
+ << _filespec
+ << ' '
+ << errnoWithDescription(errorcode));
}
ProcessId pid = ProcessId::getCurrent();
@@ -163,20 +166,26 @@ Status StorageEngineLockFile::writePid() {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << pid.toString()
- << " to file: " << _filespec << ' '
+ << " to file: "
+ << _filespec
+ << ' '
<< errnoWithDescription(errorcode));
} else if (bytesWritten == 0) {
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << pid.toString()
- << " to file: " << _filespec << " no data written.");
+ << " to file: "
+ << _filespec
+ << " no data written.");
}
if (::fsync(_lockFileHandle->_fd)) {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << pid.toString()
- << " to file (fsync failed): " << _filespec << ' '
+ << " to file (fsync failed): "
+ << _filespec
+ << ' '
<< errnoWithDescription(errorcode));
}
diff --git a/src/mongo/db/storage/storage_engine_lock_file_test.cpp b/src/mongo/db/storage/storage_engine_lock_file_test.cpp
index ee77676291a..e628c7a7ba2 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_test.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_test.cpp
@@ -38,9 +38,9 @@
#include "mongo/unittest/unittest.h"
#ifndef _WIN32
-#include <unistd.h>
-#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
#endif
namespace {
diff --git a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
index dfac9d024bd..0016f8f0873 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
@@ -109,7 +109,8 @@ Status StorageEngineLockFile::open() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Unable to check existence of data directory " << _dbpath
- << ": " << ex.what());
+ << ": "
+ << ex.what());
}
HANDLE lockFileHandle = CreateFileA(_filespec.c_str(),
@@ -170,12 +171,16 @@ Status StorageEngineLockFile::writePid() {
int errorcode = GetLastError();
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << pid.toString()
- << " to file: " << _filespec << ' '
+ << " to file: "
+ << _filespec
+ << ' '
<< errnoWithDescription(errorcode));
} else if (bytesWritten == 0) {
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << pid.toString()
- << " to file: " << _filespec << " no data written.");
+ << " to file: "
+ << _filespec
+ << " no data written.");
}
::FlushFileBuffers(_lockFileHandle->_handle);
diff --git a/src/mongo/db/storage/storage_engine_metadata.cpp b/src/mongo/db/storage/storage_engine_metadata.cpp
index 1a281e171e6..144ba7c838b 100644
--- a/src/mongo/db/storage/storage_engine_metadata.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata.cpp
@@ -32,9 +32,9 @@
#include "mongo/db/storage/storage_engine_metadata.h"
-#include <cstdio>
#include <boost/filesystem.hpp>
#include <boost/optional.hpp>
+#include <cstdio>
#include <fstream>
#include <limits>
#include <ostream>
@@ -158,16 +158,17 @@ Status StorageEngineMetadata::read() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unexpected error reading BSON data from " << filename
- << ": " << ex.what());
+ << ": "
+ << ex.what());
}
BSONObj obj;
try {
obj = BSONObj(&buffer[0]);
} catch (DBException& ex) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Failed to convert data in " << filename
- << " to BSON: " << ex.what());
+ return Status(
+ ErrorCodes::FailedToParse,
+ str::stream() << "Failed to convert data in " << filename << " to BSON: " << ex.what());
}
// Validate 'storage.engine' field.
@@ -235,8 +236,11 @@ Status StorageEngineMetadata::write() const {
} catch (const std::exception& ex) {
return Status(ErrorCodes::FileRenameFailed,
str::stream() << "Unexpected error while renaming temporary metadata file "
- << metadataTempPath.string() << " to " << metadataPath.string()
- << ": " << ex.what());
+ << metadataTempPath.string()
+ << " to "
+ << metadataPath.string()
+ << ": "
+ << ex.what());
}
return Status::OK();
@@ -252,7 +256,9 @@ Status StorageEngineMetadata::validateStorageEngineOption<bool>(StringData field
if (!element.isBoolean()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Expected boolean field " << fieldName << " but got "
- << typeName(element.type()) << " instead: " << element);
+ << typeName(element.type())
+ << " instead: "
+ << element);
}
if (element.boolean() == expectedValue) {
return Status::OK();
@@ -260,9 +266,12 @@ Status StorageEngineMetadata::validateStorageEngineOption<bool>(StringData field
return Status(
ErrorCodes::InvalidOptions,
str::stream() << "Requested option conflicts with current storage engine option for "
- << fieldName << "; you requested " << (expectedValue ? "true" : "false")
+ << fieldName
+ << "; you requested "
+ << (expectedValue ? "true" : "false")
<< " but the current server storage is already set to "
- << (element.boolean() ? "true" : "false") << " and cannot be changed");
+ << (element.boolean() ? "true" : "false")
+ << " and cannot be changed");
}
} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_metadata_test.cpp b/src/mongo/db/storage/storage_engine_metadata_test.cpp
index 0f0326a2161..466c0016037 100644
--- a/src/mongo/db/storage/storage_engine_metadata_test.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata_test.cpp
@@ -36,8 +36,8 @@
#include <ostream>
#include "mongo/bson/bsonobj.h"
-#include "mongo/db/storage/storage_engine_metadata.h"
#include "mongo/db/json.h"
+#include "mongo/db/storage/storage_engine_metadata.h"
#include "mongo/unittest/temp_dir.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/storage/storage_init.cpp b/src/mongo/db/storage/storage_init.cpp
index 16cffe81ae9..0af1b78a602 100644
--- a/src/mongo/db/storage/storage_init.cpp
+++ b/src/mongo/db/storage/storage_init.cpp
@@ -33,8 +33,8 @@
#include "mongo/db/client.h"
#include "mongo/db/commands/server_status.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/storage/storage_options.h"
namespace mongo {
@@ -55,8 +55,10 @@ public:
virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
auto engine = txn->getClient()->getServiceContext()->getGlobalStorageEngine();
return BSON("name" << storageGlobalParams.engine << "supportsCommittedReads"
- << bool(engine->getSnapshotManager()) << "readOnly"
- << storageGlobalParams.readOnly << "persistent"
+ << bool(engine->getSnapshotManager())
+ << "readOnly"
+ << storageGlobalParams.readOnly
+ << "persistent"
<< !engine->isEphemeral());
}
diff --git a/src/mongo/db/storage/storage_options.cpp b/src/mongo/db/storage/storage_options.cpp
index 9ea45c706b5..8b031ff7a59 100644
--- a/src/mongo/db/storage/storage_options.cpp
+++ b/src/mongo/db/storage/storage_options.cpp
@@ -85,7 +85,8 @@ public:
return Status(ErrorCodes::BadValue,
str::stream() << "journalCommitInterval must be between 1 and "
<< StorageGlobalParams::kMaxJournalCommitIntervalMs
- << ", but attempted to set to: " << potentialNewValue);
+ << ", but attempted to set to: "
+ << potentialNewValue);
}
return Status::OK();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp
index fd16df07dcb..0de33e67ce5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp
@@ -49,55 +49,63 @@ Status WiredTigerGlobalOptions::add(moe::OptionSection* options) {
moe::Double,
"maximum amount of memory to allocate for cache; "
"defaults to 1/2 of physical RAM");
- wiredTigerOptions.addOptionChaining(
- "storage.wiredTiger.engineConfig.statisticsLogDelaySecs",
- "wiredTigerStatisticsLogDelaySecs",
- moe::Int,
- "seconds to wait between each write to a statistics file in the dbpath; "
- "0 means do not log statistics")
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.engineConfig.statisticsLogDelaySecs",
+ "wiredTigerStatisticsLogDelaySecs",
+ moe::Int,
+ "seconds to wait between each write to a statistics file in the dbpath; "
+ "0 means do not log statistics")
.validRange(0, 100000)
.setDefault(moe::Value(0));
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.journalCompressor",
- "wiredTigerJournalCompressor",
- moe::String,
- "use a compressor for log records [none|snappy|zlib]")
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.engineConfig.journalCompressor",
+ "wiredTigerJournalCompressor",
+ moe::String,
+ "use a compressor for log records [none|snappy|zlib]")
.format("(:?none)|(:?snappy)|(:?zlib)", "(none/snappy/zlib)")
.setDefault(moe::Value(std::string("snappy")));
wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.directoryForIndexes",
"wiredTigerDirectoryForIndexes",
moe::Switch,
"Put indexes and data in different directories");
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.configString",
- "wiredTigerEngineConfigString",
- moe::String,
- "WiredTiger storage engine custom "
- "configuration settings").hidden();
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.engineConfig.configString",
+ "wiredTigerEngineConfigString",
+ moe::String,
+ "WiredTiger storage engine custom "
+ "configuration settings")
+ .hidden();
// WiredTiger collection options
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.collectionConfig.blockCompressor",
- "wiredTigerCollectionBlockCompressor",
- moe::String,
- "block compression algorithm for collection data "
- "[none|snappy|zlib]")
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.collectionConfig.blockCompressor",
+ "wiredTigerCollectionBlockCompressor",
+ moe::String,
+ "block compression algorithm for collection data "
+ "[none|snappy|zlib]")
.format("(:?none)|(:?snappy)|(:?zlib)", "(none/snappy/zlib)")
.setDefault(moe::Value(std::string("snappy")));
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.collectionConfig.configString",
- "wiredTigerCollectionConfigString",
- moe::String,
- "WiredTiger custom collection configuration settings")
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.collectionConfig.configString",
+ "wiredTigerCollectionConfigString",
+ moe::String,
+ "WiredTiger custom collection configuration settings")
.hidden();
// WiredTiger index options
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.indexConfig.prefixCompression",
- "wiredTigerIndexPrefixCompression",
- moe::Bool,
- "use prefix compression on row-store leaf pages")
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.indexConfig.prefixCompression",
+ "wiredTigerIndexPrefixCompression",
+ moe::Bool,
+ "use prefix compression on row-store leaf pages")
.setDefault(moe::Value(true));
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.indexConfig.configString",
- "wiredTigerIndexConfigString",
- moe::String,
- "WiredTiger custom index configuration settings").hidden();
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.indexConfig.configString",
+ "wiredTigerIndexConfigString",
+ moe::String,
+ "WiredTiger custom index configuration settings")
+ .hidden();
return options->addSection(wiredTigerOptions);
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index 7bc63bc7903..28b7f7d28fb 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -37,22 +37,22 @@
#include <set>
#include "mongo/base/checked_cast.h"
-#include "mongo/db/json.h"
#include "mongo/db/catalog/index_catalog_entry.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/json.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/key_string.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
-#include "mongo/db/storage/storage_options.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/assert_util.h"
-#include "mongo/util/hex.h"
#include "mongo/util/fail_point.h"
+#include "mongo/util/hex.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -60,7 +60,7 @@
#if TRACING_ENABLED
#define TRACE_CURSOR log() << "WT index (" << (const void*)&_idx << ") "
-#define TRACE_INDEX log() << "WT index (" << (const void*) this << ") "
+#define TRACE_INDEX log() << "WT index (" << (const void*)this << ") "
#else
#define TRACE_CURSOR \
if (0) \
@@ -141,7 +141,8 @@ StatusWith<std::string> WiredTigerIndex::parseIndexOptions(const BSONObj& option
// Return error on first unrecognized field.
return StatusWith<std::string>(ErrorCodes::InvalidOptions,
str::stream() << '\'' << elem.fieldNameStringData()
- << '\'' << " is not a supported option.");
+ << '\''
+ << " is not a supported option.");
}
}
return StatusWith<std::string>(ss.str());
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
index c2d6c70cf12..ee729502550 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
@@ -31,8 +31,8 @@
#include <wiredtiger.h>
#include "mongo/base/status_with.h"
-#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/index_entry_comparison.h"
+#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/sorted_data_interface.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h"
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
index 7cd4a095699..4173f8ed455 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
@@ -71,7 +71,8 @@ public:
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
- << "ns" << ns);
+ << "ns"
+ << ns);
IndexDescriptor desc(NULL, "", spec);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
index fc60f082ea6..2d4db8a3123 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
@@ -33,20 +33,20 @@
#include "mongo/base/init.h"
#include "mongo/db/catalog/collection_options.h"
-#include "mongo/db/service_context_d.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/service_context.h"
+#include "mongo/db/service_context_d.h"
#include "mongo/db/storage/kv/kv_storage_engine.h"
#include "mongo/db/storage/storage_engine_lock_file.h"
#include "mongo/db/storage/storage_engine_metadata.h"
-#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_index.h"
+#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_parameters.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_server_status.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
-#include "mongo/db/storage/storage_options.h"
#include "mongo/util/log.h"
namespace mongo {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
index 4bf932ac276..a2830efebfd 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
@@ -29,12 +29,12 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/json.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine_metadata.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
-#include "mongo/db/storage/storage_options.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/mongoutils/str.h"
@@ -94,9 +94,15 @@ void _testValidateMetadata(const StorageEngine::Factory* factory,
if (expectedCode != status.code()) {
FAIL(str::stream()
<< "Unexpected StorageEngine::Factory::validateMetadata result. Expected: "
- << ErrorCodes::errorString(expectedCode) << " but got " << status.toString()
- << " instead. metadataOptions: " << metadataOptions << "; directoryPerDB: "
- << directoryPerDB << "; directoryForIndexes: " << directoryForIndexes);
+ << ErrorCodes::errorString(expectedCode)
+ << " but got "
+ << status.toString()
+ << " instead. metadataOptions: "
+ << metadataOptions
+ << "; directoryPerDB: "
+ << directoryPerDB
+ << "; directoryForIndexes: "
+ << directoryForIndexes);
}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 163654c12cb..e6c2875ec7c 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -45,13 +45,14 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/client.h"
+#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/concurrency/locker.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
-#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/journal_listener.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_index.h"
@@ -60,11 +61,10 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
-#include "mongo/db/storage/storage_options.h"
-#include "mongo/util/log.h"
#include "mongo/util/background.h"
#include "mongo/util/concurrency/ticketholder.h"
#include "mongo/util/exit.h"
+#include "mongo/util/log.h"
#include "mongo/util/processinfo.h"
#include "mongo/util/scopeguard.h"
#include "mongo/util/time_support.h"
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
index 49ef155b51d..cb7852d0bfc 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
@@ -70,7 +70,8 @@ Status WiredTigerEngineRuntimeConfigParameter::setFromString(const std::string&
return Status(ErrorCodes::BadValue,
(str::stream()
<< "WiredTiger configuration strings cannot have embedded null characters. "
- "Embedded null found at position " << pos));
+ "Embedded null found at position "
+ << pos));
}
log() << "Reconfiguring WiredTiger storage engine with config string: \"" << str << "\"";
@@ -79,7 +80,9 @@ Status WiredTigerEngineRuntimeConfigParameter::setFromString(const std::string&
if (ret != 0) {
string result =
(mongoutils::str::stream() << "WiredTiger reconfiguration failed with error code ("
- << ret << "): " << wiredtiger_strerror(ret));
+ << ret
+ << "): "
+ << wiredtiger_strerror(ret));
error() << result;
return Status(ErrorCodes::BadValue, result);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index f31bbd1e38a..1543b5706f6 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -488,7 +488,8 @@ public:
if (_forward && MONGO_FAIL_POINT(WTEmulateOutOfOrderNextRecordId)) {
log() << "WTEmulateOutOfOrderNextRecordId fail point has triggerd so RecordId is now "
- "RecordId(1) instead of " << id;
+ "RecordId(1) instead of "
+ << id;
// Replace the found RecordId with a (small) fake one.
id = RecordId{1};
}
@@ -649,7 +650,8 @@ StatusWith<std::string> WiredTigerRecordStore::parseOptionsField(const BSONObj o
// Return error on first unrecognized field.
return StatusWith<std::string>(ErrorCodes::InvalidOptions,
str::stream() << '\'' << elem.fieldNameStringData()
- << '\'' << " is not a supported option.");
+ << '\''
+ << " is not a supported option.");
}
}
return StatusWith<std::string>(ss.str());
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index 37d64e8bd00..249dcacdc81 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -33,16 +33,16 @@
#include <sstream>
#include <string>
+#include "mongo/base/checked_cast.h"
#include "mongo/base/string_data.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/base/checked_cast.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/json.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/storage/record_store_test_harness.h"
-#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h"
+#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index 415081d334c..6f4bcf0d025 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -154,7 +154,9 @@ Status WiredTigerUtil::getApplicationMetadata(OperationContext* opCtx,
if (keysSeen.count(key)) {
return Status(ErrorCodes::DuplicateKey,
str::stream() << "app_metadata must not contain duplicate keys. "
- << "Found multiple instances of key '" << key << "'.");
+ << "Found multiple instances of key '"
+ << key
+ << "'.");
}
keysSeen.insert(key);
@@ -230,7 +232,9 @@ Status WiredTigerUtil::checkApplicationMetadataFormatVersion(OperationContext* o
if (version < minimumVersion || version > maximumVersion) {
return Status(ErrorCodes::UnsupportedFormat,
str::stream() << "Application metadata for " << uri
- << " has unsupported format version: " << version << ".");
+ << " has unsupported format version: "
+ << version
+ << ".");
}
LOG(2) << "WiredTigerUtil::checkApplicationMetadataFormatVersion "
@@ -278,7 +282,8 @@ StatusWith<uint64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
if (ret != 0) {
return StatusWith<uint64_t>(ErrorCodes::CursorNotFound,
str::stream() << "unable to open cursor at URI " << uri
- << ". reason: " << wiredtiger_strerror(ret));
+ << ". reason: "
+ << wiredtiger_strerror(ret));
}
invariant(cursor);
ON_BLOCK_EXIT(cursor->close, cursor);
@@ -286,19 +291,21 @@ StatusWith<uint64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
cursor->set_key(cursor, statisticsKey);
ret = cursor->search(cursor);
if (ret != 0) {
- return StatusWith<uint64_t>(ErrorCodes::NoSuchKey,
- str::stream() << "unable to find key " << statisticsKey
- << " at URI " << uri
- << ". reason: " << wiredtiger_strerror(ret));
+ return StatusWith<uint64_t>(
+ ErrorCodes::NoSuchKey,
+ str::stream() << "unable to find key " << statisticsKey << " at URI " << uri
+ << ". reason: "
+ << wiredtiger_strerror(ret));
}
uint64_t value;
ret = cursor->get_value(cursor, NULL, NULL, &value);
if (ret != 0) {
- return StatusWith<uint64_t>(ErrorCodes::BadValue,
- str::stream() << "unable to get value for key " << statisticsKey
- << " at URI " << uri
- << ". reason: " << wiredtiger_strerror(ret));
+ return StatusWith<uint64_t>(
+ ErrorCodes::BadValue,
+ str::stream() << "unable to get value for key " << statisticsKey << " at URI " << uri
+ << ". reason: "
+ << wiredtiger_strerror(ret));
}
return StatusWith<uint64_t>(value);
@@ -437,8 +444,8 @@ Status WiredTigerUtil::exportTableToBSON(WT_SESSION* session,
int ret = session->open_cursor(session, uri.c_str(), NULL, cursorConfig, &c);
if (ret != 0) {
return Status(ErrorCodes::CursorNotFound,
- str::stream() << "unable to open cursor at URI " << uri
- << ". reason: " << wiredtiger_strerror(ret));
+ str::stream() << "unable to open cursor at URI " << uri << ". reason: "
+ << wiredtiger_strerror(ret));
}
bob->append("uri", uri);
invariant(c);
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 6c557364edf..85da961f66c 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -48,9 +48,9 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/delete.h"
#include "mongo/db/index/index_descriptor.h"
-#include "mongo/db/ops/insert.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/ops/insert.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/server_parameters.h"
diff --git a/src/mongo/db/update_index_data.cpp b/src/mongo/db/update_index_data.cpp
index 9d66a4a7a6e..2b1144d40a3 100644
--- a/src/mongo/db/update_index_data.cpp
+++ b/src/mongo/db/update_index_data.cpp
@@ -28,9 +28,9 @@
* it in the license file.
*/
+#include "mongo/db/update_index_data.h"
#include "mongo/bson/util/builder.h"
#include "mongo/db/field_ref.h"
-#include "mongo/db/update_index_data.h"
namespace mongo {
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index 3bde99387b7..e0a43779eea 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -36,11 +36,11 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/client.h"
#include "mongo/db/commands/server_status_metric.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/server_options.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/stats/timer_stats.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/write_concern_options.h"
@@ -117,7 +117,8 @@ Status validateWriteConcern(OperationContext* txn,
ErrorCodes::BadValue,
str::stream()
<< "w:1 and w:'majority' are the only valid write concerns when writing to "
- "config servers, got: " << writeConcern.toBSON().toString());
+ "config servers, got: "
+ << writeConcern.toBSON().toString());
}
if (replMode == repl::ReplicationCoordinator::modeReplSet && !isLocalDb &&
@@ -127,7 +128,8 @@ Status validateWriteConcern(OperationContext* txn,
ErrorCodes::BadValue,
str::stream()
<< "w: 'majority' is the only valid write concern when writing to config "
- "server replica sets, got: " << writeConcern.toBSON().toString());
+ "server replica sets, got: "
+ << writeConcern.toBSON().toString());
}
}