summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/authorization_manager_impl.cpp3
-rw-r--r--src/mongo/db/auth/authorization_manager_test.cpp30
-rw-r--r--src/mongo/db/auth/authorization_session_impl.cpp21
-rw-r--r--src/mongo/db/auth/authorization_session_test.cpp103
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp28
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp3
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_s.cpp55
-rw-r--r--src/mongo/db/auth/privilege_parser_test.cpp27
-rw-r--r--src/mongo/db/auth/role_graph.cpp42
-rw-r--r--src/mongo/db/auth/role_graph_test.cpp63
-rw-r--r--src/mongo/db/auth/role_graph_update.cpp21
-rw-r--r--src/mongo/db/auth/sasl_authentication_session_test.cpp24
-rw-r--r--src/mongo/db/auth/sasl_mechanism_registry.cpp7
-rw-r--r--src/mongo/db/auth/sasl_mechanism_registry_test.cpp11
-rw-r--r--src/mongo/db/auth/sasl_options_init.cpp2
-rw-r--r--src/mongo/db/auth/sasl_plain_server_conversation.cpp5
-rw-r--r--src/mongo/db/auth/sasl_plain_server_conversation.h5
-rw-r--r--src/mongo/db/auth/sasl_scram_server_conversation.cpp15
-rw-r--r--src/mongo/db/auth/sasl_scram_test.cpp17
-rw-r--r--src/mongo/db/auth/security_file.cpp4
-rw-r--r--src/mongo/db/auth/user.cpp2
-rw-r--r--src/mongo/db/auth/user_document_parser.cpp8
-rw-r--r--src/mongo/db/auth/user_document_parser_test.cpp102
-rw-r--r--src/mongo/db/auth/user_management_commands_parser.cpp15
-rw-r--r--src/mongo/db/baton.cpp2
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp14
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp7
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp17
-rw-r--r--src/mongo/db/catalog/collection_catalog.h5
-rw-r--r--src/mongo/db/catalog/collection_catalog_test.cpp2
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp4
-rw-r--r--src/mongo/db/catalog/collection_compact.h6
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp45
-rw-r--r--src/mongo/db/catalog/collection_options.cpp8
-rw-r--r--src/mongo/db/catalog/collection_options.h2
-rw-r--r--src/mongo/db/catalog/create_collection.cpp9
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp8
-rw-r--r--src/mongo/db/catalog/database_impl.cpp21
-rw-r--r--src/mongo/db/catalog/database_test.cpp73
-rw-r--r--src/mongo/db/catalog/document_validation.h2
-rw-r--r--src/mongo/db/catalog/drop_database.cpp25
-rw-r--r--src/mongo/db/catalog/drop_database_test.cpp8
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp33
-rw-r--r--src/mongo/db/catalog/health_log.cpp4
-rw-r--r--src/mongo/db/catalog/health_log.h2
-rw-r--r--src/mongo/db/catalog/index_build_block.cpp4
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp3
-rw-r--r--src/mongo/db/catalog/index_builds_manager_test.cpp3
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp12
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp87
-rw-r--r--src/mongo/db/catalog/index_consistency.cpp3
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp74
-rw-r--r--src/mongo/db/catalog/index_key_validate_test.cpp6
-rw-r--r--src/mongo/db/catalog/index_spec_validate_test.cpp379
-rw-r--r--src/mongo/db/catalog/index_timestamp_helper.h4
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp15
-rw-r--r--src/mongo/db/catalog/private/record_store_validate_adaptor.cpp14
-rw-r--r--src/mongo/db/catalog/private/record_store_validate_adaptor.h2
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp29
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp37
-rw-r--r--src/mongo/db/catalog/util/partitioned.h2
-rw-r--r--src/mongo/db/catalog/util/partitioned_test.cpp1
-rw-r--r--src/mongo/db/catalog_raii.cpp6
-rw-r--r--src/mongo/db/client.cpp4
-rw-r--r--src/mongo/db/clientcursor.cpp2
-rw-r--r--src/mongo/db/cloner.cpp43
-rw-r--r--src/mongo/db/commands/clone_collection.cpp4
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp3
-rw-r--r--src/mongo/db/commands/compact.cpp2
-rw-r--r--src/mongo/db/commands/connection_status.cpp2
-rw-r--r--src/mongo/db/commands/count_cmd.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp29
-rw-r--r--src/mongo/db/commands/dbcheck.cpp4
-rw-r--r--src/mongo/db/commands/dbcommands.cpp4
-rw-r--r--src/mongo/db/commands/dbcommands_d.cpp8
-rw-r--r--src/mongo/db/commands/dbhash.cpp9
-rw-r--r--src/mongo/db/commands/do_txn_cmd.cpp4
-rw-r--r--src/mongo/db/commands/driverHelpers.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp2
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp3
-rw-r--r--src/mongo/db/commands/fail_point_cmd.cpp2
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp9
-rw-r--r--src/mongo/db/commands/feature_compatibility_version_command_parser.cpp20
-rw-r--r--src/mongo/db/commands/feature_compatibility_version_documentation.h4
-rw-r--r--src/mongo/db/commands/feature_compatibility_version_parser.cpp75
-rw-r--r--src/mongo/db/commands/find_cmd.cpp10
-rw-r--r--src/mongo/db/commands/fsync.cpp4
-rw-r--r--src/mongo/db/commands/fsync_locked.h10
-rw-r--r--src/mongo/db/commands/generic_servers.cpp4
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp22
-rw-r--r--src/mongo/db/commands/hashcmd.cpp2
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp2
-rw-r--r--src/mongo/db/commands/list_databases.cpp2
-rw-r--r--src/mongo/db/commands/list_indexes.cpp4
-rw-r--r--src/mongo/db/commands/lock_info.cpp2
-rw-r--r--src/mongo/db/commands/mr.cpp22
-rw-r--r--src/mongo/db/commands/mr.h16
-rw-r--r--src/mongo/db/commands/mr_common.cpp4
-rw-r--r--src/mongo/db/commands/mr_test.cpp6
-rw-r--r--src/mongo/db/commands/parameters.cpp17
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp2
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp25
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp4
-rw-r--r--src/mongo/db/commands/server_status_internal.cpp2
-rw-r--r--src/mongo/db/commands/server_status_internal.h2
-rw-r--r--src/mongo/db/commands/server_status_metric.cpp2
-rw-r--r--src/mongo/db/commands/server_status_metric.h2
-rw-r--r--src/mongo/db/commands/sleep_command.cpp2
-rw-r--r--src/mongo/db/commands/snapshot_management.cpp2
-rw-r--r--src/mongo/db/commands/test_commands.cpp2
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp80
-rw-r--r--src/mongo/db/commands/user_management_commands_common.cpp35
-rw-r--r--src/mongo/db/commands/validate.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp2
-rw-r--r--src/mongo/db/concurrency/d_concurrency_bm.cpp4
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp2
-rw-r--r--src/mongo/db/concurrency/lock_manager.cpp5
-rw-r--r--src/mongo/db/concurrency/lock_manager.h52
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp9
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp11
-rw-r--r--src/mongo/db/concurrency/write_conflict_exception.cpp12
-rw-r--r--src/mongo/db/curop.cpp9
-rw-r--r--src/mongo/db/curop_failpoint_helpers.cpp2
-rw-r--r--src/mongo/db/curop_failpoint_helpers.h2
-rw-r--r--src/mongo/db/db.cpp4
-rw-r--r--src/mongo/db/db_raii.cpp3
-rw-r--r--src/mongo/db/dbdirectclient.cpp2
-rw-r--r--src/mongo/db/dbhelpers.cpp2
-rw-r--r--src/mongo/db/dbmessage.cpp4
-rw-r--r--src/mongo/db/dbmessage.h6
-rw-r--r--src/mongo/db/dbmessage_test.cpp2
-rw-r--r--src/mongo/db/exec/and_sorted.cpp2
-rw-r--r--src/mongo/db/exec/change_stream_proxy.cpp3
-rw-r--r--src/mongo/db/exec/collection_scan.cpp6
-rw-r--r--src/mongo/db/exec/count_scan.cpp2
-rw-r--r--src/mongo/db/exec/geo_near.cpp18
-rw-r--r--src/mongo/db/exec/queued_data_stage_test.cpp2
-rw-r--r--src/mongo/db/exec/record_store_fast_count.h2
-rw-r--r--src/mongo/db/exec/requires_collection_stage.cpp3
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp10
-rw-r--r--src/mongo/db/exec/text_or.cpp2
-rw-r--r--src/mongo/db/exec/update_stage.cpp3
-rw-r--r--src/mongo/db/exec/write_stage_common.h4
-rw-r--r--src/mongo/db/exhaust_cursor_currentop_integration_test.cpp14
-rw-r--r--src/mongo/db/field_parser_test.cpp76
-rw-r--r--src/mongo/db/field_ref_set.cpp4
-rw-r--r--src/mongo/db/free_mon/free_mon_controller.h32
-rw-r--r--src/mongo/db/free_mon/free_mon_controller_test.cpp201
-rw-r--r--src/mongo/db/free_mon/free_mon_message.h24
-rw-r--r--src/mongo/db/free_mon/free_mon_mongod.cpp31
-rw-r--r--src/mongo/db/free_mon/free_mon_op_observer.cpp5
-rw-r--r--src/mongo/db/free_mon/free_mon_options.h4
-rw-r--r--src/mongo/db/free_mon/free_mon_processor.cpp60
-rw-r--r--src/mongo/db/free_mon/free_mon_queue_test.cpp2
-rw-r--r--src/mongo/db/ftdc/compressor_test.cpp223
-rw-r--r--src/mongo/db/ftdc/controller.h16
-rw-r--r--src/mongo/db/ftdc/controller_test.cpp4
-rw-r--r--src/mongo/db/ftdc/file_manager.cpp10
-rw-r--r--src/mongo/db/ftdc/file_manager_test.cpp92
-rw-r--r--src/mongo/db/ftdc/file_reader.cpp3
-rw-r--r--src/mongo/db/ftdc/file_writer.cpp3
-rw-r--r--src/mongo/db/ftdc/file_writer_test.cpp86
-rw-r--r--src/mongo/db/ftdc/ftdc_system_stats.h1
-rw-r--r--src/mongo/db/ftdc/ftdc_system_stats_linux.cpp5
-rw-r--r--src/mongo/db/ftdc/util.cpp4
-rw-r--r--src/mongo/db/ftdc/util.h24
-rw-r--r--src/mongo/db/ftdc/varint.h4
-rw-r--r--src/mongo/db/fts/fts_element_iterator.cpp9
-rw-r--r--src/mongo/db/fts/fts_index_format.cpp16
-rw-r--r--src/mongo/db/fts/fts_index_format.h4
-rw-r--r--src/mongo/db/fts/fts_index_format_test.cpp18
-rw-r--r--src/mongo/db/fts/fts_language.cpp14
-rw-r--r--src/mongo/db/fts/fts_language.h4
-rw-r--r--src/mongo/db/fts/fts_language_test.cpp4
-rw-r--r--src/mongo/db/fts/fts_matcher.cpp4
-rw-r--r--src/mongo/db/fts/fts_matcher.h4
-rw-r--r--src/mongo/db/fts/fts_matcher_test.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_impl.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_impl.h4
-rw-r--r--src/mongo/db/fts/fts_query_impl_test.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_parser.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_parser.h4
-rw-r--r--src/mongo/db/fts/fts_spec.cpp22
-rw-r--r--src/mongo/db/fts/fts_spec_legacy.cpp10
-rw-r--r--src/mongo/db/fts/fts_spec_test.cpp22
-rw-r--r--src/mongo/db/fts/fts_util.cpp4
-rw-r--r--src/mongo/db/fts/fts_util.h4
-rw-r--r--src/mongo/db/fts/stemmer.cpp4
-rw-r--r--src/mongo/db/fts/stemmer.h4
-rw-r--r--src/mongo/db/fts/stemmer_test.cpp4
-rw-r--r--src/mongo/db/fts/stop_words.cpp6
-rw-r--r--src/mongo/db/fts/stop_words.h4
-rw-r--r--src/mongo/db/fts/stop_words_test.cpp4
-rw-r--r--src/mongo/db/fts/tokenizer.cpp4
-rw-r--r--src/mongo/db/fts/tokenizer.h4
-rw-r--r--src/mongo/db/fts/tokenizer_test.cpp4
-rw-r--r--src/mongo/db/fts/unicode/string.cpp2
-rw-r--r--src/mongo/db/fts/unicode/string_test.cpp2
-rw-r--r--src/mongo/db/geo/big_polygon.cpp2
-rw-r--r--src/mongo/db/geo/big_polygon.h2
-rw-r--r--src/mongo/db/geo/big_polygon_test.cpp169
-rw-r--r--src/mongo/db/geo/geometry_container.cpp5
-rw-r--r--src/mongo/db/geo/geoparser.cpp3
-rw-r--r--src/mongo/db/geo/geoparser_test.cpp2
-rw-r--r--src/mongo/db/geo/hash.cpp13
-rw-r--r--src/mongo/db/geo/hash_test.cpp2
-rw-r--r--src/mongo/db/geo/r2_region_coverer.cpp2
-rw-r--r--src/mongo/db/geo/shapes.h5
-rw-r--r--src/mongo/db/hasher.h2
-rw-r--r--src/mongo/db/hasher_test.cpp3
-rw-r--r--src/mongo/db/index/btree_key_generator.cpp4
-rw-r--r--src/mongo/db/index/btree_key_generator_test.cpp2
-rw-r--r--src/mongo/db/index/expression_params.cpp10
-rw-r--r--src/mongo/db/index/index_access_method.cpp4
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp10
-rw-r--r--src/mongo/db/index/index_build_interceptor.h6
-rw-r--r--src/mongo/db/index/index_descriptor.cpp5
-rw-r--r--src/mongo/db/index/s2_access_method.cpp36
-rw-r--r--src/mongo/db/index/s2_key_generator_test.cpp21
-rw-r--r--src/mongo/db/index/sort_key_generator_test.cpp3
-rw-r--r--src/mongo/db/index_builder.h2
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp42
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp10
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod_test.cpp3
-rw-r--r--src/mongo/db/initialize_server_global_state.cpp10
-rw-r--r--src/mongo/db/initialize_server_security_state.cpp4
-rw-r--r--src/mongo/db/introspect.cpp2
-rw-r--r--src/mongo/db/keypattern.cpp3
-rw-r--r--src/mongo/db/keypattern_test.cpp2
-rw-r--r--src/mongo/db/keys_collection_cache.cpp6
-rw-r--r--src/mongo/db/keys_collection_client.h4
-rw-r--r--src/mongo/db/keys_collection_client_direct.h4
-rw-r--r--src/mongo/db/keys_collection_client_sharded.h4
-rw-r--r--src/mongo/db/log_process_details.cpp2
-rw-r--r--src/mongo/db/logical_clock.cpp5
-rw-r--r--src/mongo/db/logical_session_cache_test.cpp5
-rw-r--r--src/mongo/db/logical_session_id_test.cpp48
-rw-r--r--src/mongo/db/logical_time_test.cpp10
-rw-r--r--src/mongo/db/matcher/expression.cpp2
-rw-r--r--src/mongo/db/matcher/expression.h2
-rw-r--r--src/mongo/db/matcher/expression_array.cpp2
-rw-r--r--src/mongo/db/matcher/expression_array.h6
-rw-r--r--src/mongo/db/matcher/expression_geo.cpp21
-rw-r--r--src/mongo/db/matcher/expression_geo_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_leaf.cpp2
-rw-r--r--src/mongo/db/matcher/expression_leaf.h2
-rw-r--r--src/mongo/db/matcher/expression_leaf_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_parser.cpp45
-rw-r--r--src/mongo/db/matcher/expression_parser_array_test.cpp154
-rw-r--r--src/mongo/db/matcher/expression_parser_leaf_test.cpp163
-rw-r--r--src/mongo/db/matcher/expression_parser_test.cpp9
-rw-r--r--src/mongo/db/matcher/expression_parser_tree_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_text.cpp6
-rw-r--r--src/mongo/db/matcher/expression_text_base.cpp6
-rw-r--r--src/mongo/db/matcher/expression_tree.cpp2
-rw-r--r--src/mongo/db/matcher/expression_tree.h2
-rw-r--r--src/mongo/db/matcher/expression_tree_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_type_test.cpp4
-rw-r--r--src/mongo/db/matcher/expression_where.cpp4
-rw-r--r--src/mongo/db/matcher/expression_where_noop.cpp2
-rw-r--r--src/mongo/db/matcher/expression_with_placeholder.cpp11
-rw-r--r--src/mongo/db/matcher/match_details.cpp2
-rw-r--r--src/mongo/db/matcher/match_details.h2
-rw-r--r--src/mongo/db/matcher/matchable.cpp2
-rw-r--r--src/mongo/db/matcher/matchable.h4
-rw-r--r--src/mongo/db/matcher/path.cpp2
-rw-r--r--src/mongo/db/matcher/path.h2
-rw-r--r--src/mongo/db/matcher/path_accepting_keyword_test.cpp45
-rw-r--r--src/mongo/db/matcher/path_test.cpp2
-rw-r--r--src/mongo/db/matcher/schema/expression_internal_schema_max_length.h4
-rw-r--r--src/mongo/db/matcher/schema/expression_internal_schema_min_length.h4
-rw-r--r--src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp3
-rw-r--r--src/mongo/db/matcher/schema/json_pointer_test.cpp10
-rw-r--r--src/mongo/db/matcher/schema/json_schema_parser.cpp159
-rw-r--r--src/mongo/db/mongod_options.cpp9
-rw-r--r--src/mongo/db/mongod_options.h2
-rw-r--r--src/mongo/db/multi_key_path_tracker.cpp4
-rw-r--r--src/mongo/db/multi_key_path_tracker_test.cpp3
-rw-r--r--src/mongo/db/namespace_string.cpp4
-rw-r--r--src/mongo/db/op_observer_impl.cpp8
-rw-r--r--src/mongo/db/op_observer_impl_test.cpp528
-rw-r--r--src/mongo/db/op_observer_util.h2
-rw-r--r--src/mongo/db/operation_time_tracker.cpp2
-rw-r--r--src/mongo/db/ops/delete.h2
-rw-r--r--src/mongo/db/ops/insert.cpp22
-rw-r--r--src/mongo/db/ops/insert.h2
-rw-r--r--src/mongo/db/ops/update.cpp3
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp8
-rw-r--r--src/mongo/db/ops/write_ops_parsers.cpp9
-rw-r--r--src/mongo/db/ops/write_ops_parsers_test.cpp80
-rw-r--r--src/mongo/db/ops/write_ops_retryability.cpp41
-rw-r--r--src/mongo/db/ops/write_ops_retryability_test.cpp45
-rw-r--r--src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp18
-rw-r--r--src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp20
-rw-r--r--src/mongo/db/pipeline/accumulator.h2
-rw-r--r--src/mongo/db/pipeline/accumulator_avg.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_first.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_last.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_merge_objects.cpp3
-rw-r--r--src/mongo/db/pipeline/accumulator_min_max.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_push.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_std_dev.cpp2
-rw-r--r--src/mongo/db/pipeline/aggregation_request.cpp16
-rw-r--r--src/mongo/db/pipeline/dependencies.cpp2
-rw-r--r--src/mongo/db/pipeline/dependencies.h2
-rw-r--r--src/mongo/db/pipeline/dependencies_test.cpp3
-rw-r--r--src/mongo/db/pipeline/document.cpp5
-rw-r--r--src/mongo/db/pipeline/document.h2
-rw-r--r--src/mongo/db/pipeline/document_internal.h2
-rw-r--r--src/mongo/db/pipeline/document_source_add_fields.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_bucket.cpp39
-rw-r--r--src/mongo/db/pipeline/document_source_bucket_auto_test.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream.cpp18
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream_test.cpp29
-rw-r--r--src/mongo/db/pipeline/document_source_coll_stats.cpp17
-rw-r--r--src/mongo/db/pipeline/document_source_current_op.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_current_op.h3
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_exchange.cpp12
-rw-r--r--src/mongo/db/pipeline/document_source_exchange_test.cpp84
-rw-r--r--src/mongo/db/pipeline/document_source_facet.cpp10
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup.cpp25
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup_test.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_group_test.cpp32
-rw-r--r--src/mongo/db/pipeline/document_source_index_stats.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h2
-rw-r--r--src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_internal_split_pipeline.h2
-rw-r--r--src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_list_cached_and_active_users.h3
-rw-r--r--src/mongo/db/pipeline/document_source_list_local_sessions.h3
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.cpp13
-rw-r--r--src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp17
-rw-r--r--src/mongo/db/pipeline/document_source_lookup_test.cpp137
-rw-r--r--src/mongo/db/pipeline/document_source_match.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_merge.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_merge.h2
-rw-r--r--src/mongo/db/pipeline/document_source_merge_cursors_test.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_merge_test.cpp199
-rw-r--r--src/mongo/db/pipeline/document_source_mock.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_out.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_plan_cache_stats.cpp12
-rw-r--r--src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_queue.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_redact.cpp5
-rw-r--r--src/mongo/db/pipeline/document_source_replace_root.cpp10
-rw-r--r--src/mongo/db/pipeline/document_source_replace_root_test.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_sequential_document_cache.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_sequential_document_cache.h2
-rw-r--r--src/mongo/db/pipeline/document_source_skip.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_unwind.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_unwind_test.cpp23
-rw-r--r--src/mongo/db/pipeline/document_source_writer.h2
-rw-r--r--src/mongo/db/pipeline/expression.cpp284
-rw-r--r--src/mongo/db/pipeline/expression.h22
-rw-r--r--src/mongo/db/pipeline/expression_convert_test.cpp40
-rw-r--r--src/mongo/db/pipeline/expression_date_test.cpp129
-rw-r--r--src/mongo/db/pipeline/expression_test.cpp194
-rw-r--r--src/mongo/db/pipeline/expression_trigonometric.h8
-rw-r--r--src/mongo/db/pipeline/expression_trigonometric_test.cpp2
-rw-r--r--src/mongo/db/pipeline/field_path.cpp2
-rw-r--r--src/mongo/db/pipeline/field_path.h2
-rw-r--r--src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp140
-rw-r--r--src/mongo/db/pipeline/lite_parsed_document_source.cpp2
-rw-r--r--src/mongo/db/pipeline/lite_parsed_pipeline.cpp3
-rw-r--r--src/mongo/db/pipeline/lookup_set_cache.h4
-rw-r--r--src/mongo/db/pipeline/mongos_process_interface.cpp9
-rw-r--r--src/mongo/db/pipeline/parsed_aggregation_projection.cpp41
-rw-r--r--src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp38
-rw-r--r--src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp21
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp6
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp23
-rw-r--r--src/mongo/db/pipeline/pipeline_metadata_tree.h17
-rw-r--r--src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp15
-rw-r--r--src/mongo/db/pipeline/process_interface_standalone.cpp27
-rw-r--r--src/mongo/db/pipeline/process_interface_standalone_test.cpp2
-rw-r--r--src/mongo/db/pipeline/resume_token.cpp5
-rw-r--r--src/mongo/db/pipeline/resume_token_test.cpp4
-rw-r--r--src/mongo/db/pipeline/sharded_agg_helpers.cpp13
-rw-r--r--src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp8
-rw-r--r--src/mongo/db/pipeline/value.cpp14
-rw-r--r--src/mongo/db/pipeline/value.h4
-rw-r--r--src/mongo/db/pipeline/variables.cpp10
-rw-r--r--src/mongo/db/pipeline/variables.h2
-rw-r--r--src/mongo/db/query/canonical_query_encoder.cpp20
-rw-r--r--src/mongo/db/query/canonical_query_encoder.h4
-rw-r--r--src/mongo/db/query/collation/collation_index_key.cpp6
-rw-r--r--src/mongo/db/query/collation/collation_index_key_test.cpp3
-rw-r--r--src/mongo/db/query/collation/collation_spec_test.cpp144
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu.cpp155
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu_decoration.cpp4
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu_test.cpp176
-rw-r--r--src/mongo/db/query/collation/collator_interface_mock_test.cpp10
-rw-r--r--src/mongo/db/query/count_command_test.cpp79
-rw-r--r--src/mongo/db/query/cursor_response.cpp18
-rw-r--r--src/mongo/db/query/cursor_response_test.cpp183
-rw-r--r--src/mongo/db/query/datetime/date_time_support.cpp10
-rw-r--r--src/mongo/db/query/datetime/date_time_support.h3
-rw-r--r--src/mongo/db/query/datetime/init_timezone_data.cpp3
-rw-r--r--src/mongo/db/query/explain.h2
-rw-r--r--src/mongo/db/query/explain_options.cpp11
-rw-r--r--src/mongo/db/query/find.cpp3
-rw-r--r--src/mongo/db/query/find_and_modify_request.cpp18
-rw-r--r--src/mongo/db/query/find_and_modify_request.h14
-rw-r--r--src/mongo/db/query/get_executor.cpp14
-rw-r--r--src/mongo/db/query/get_executor_test.cpp15
-rw-r--r--src/mongo/db/query/getmore_request.cpp11
-rw-r--r--src/mongo/db/query/getmore_request_test.cpp45
-rw-r--r--src/mongo/db/query/killcursors_request.cpp4
-rw-r--r--src/mongo/db/query/killcursors_request_test.cpp21
-rw-r--r--src/mongo/db/query/killcursors_response.cpp4
-rw-r--r--src/mongo/db/query/killcursors_response_test.cpp42
-rw-r--r--src/mongo/db/query/parsed_distinct.cpp18
-rw-r--r--src/mongo/db/query/parsed_distinct_test.cpp50
-rw-r--r--src/mongo/db/query/parsed_projection.cpp8
-rw-r--r--src/mongo/db/query/parsed_projection_test.cpp5
-rw-r--r--src/mongo/db/query/plan_cache_indexability.cpp2
-rw-r--r--src/mongo/db/query/plan_cache_indexability_test.cpp4
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp3
-rw-r--r--src/mongo/db/query/plan_enumerator.cpp14
-rw-r--r--src/mongo/db/query/planner_analysis.cpp2
-rw-r--r--src/mongo/db/query/planner_ixselect.cpp15
-rw-r--r--src/mongo/db/query/planner_ixselect_test.cpp10
-rw-r--r--src/mongo/db/query/query_planner.cpp15
-rw-r--r--src/mongo/db/query/query_planner_geo_test.cpp80
-rw-r--r--src/mongo/db/query/query_planner_test.cpp17
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp4
-rw-r--r--src/mongo/db/query/query_planner_text_test.cpp113
-rw-r--r--src/mongo/db/query/query_planner_wildcard_index_test.cpp3
-rw-r--r--src/mongo/db/query/query_request.cpp20
-rw-r--r--src/mongo/db/query/query_request_test.cpp2
-rw-r--r--src/mongo/db/query/query_settings_test.cpp4
-rw-r--r--src/mongo/db/query/query_solution.cpp2
-rw-r--r--src/mongo/db/query/query_solution_test.cpp3
-rw-r--r--src/mongo/db/query/stage_builder.cpp7
-rw-r--r--src/mongo/db/read_concern.h2
-rw-r--r--src/mongo/db/read_concern_mongod.cpp12
-rw-r--r--src/mongo/db/read_concern_test.cpp4
-rw-r--r--src/mongo/db/repair_database.cpp7
-rw-r--r--src/mongo/db/repair_database_and_check_version.cpp20
-rw-r--r--src/mongo/db/repl/abstract_async_component.cpp13
-rw-r--r--src/mongo/db/repl/abstract_async_component.h3
-rw-r--r--src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h2
-rw-r--r--src/mongo/db/repl/applier_helpers.cpp3
-rw-r--r--src/mongo/db/repl/apply_ops.cpp11
-rw-r--r--src/mongo/db/repl/apply_ops.h2
-rw-r--r--src/mongo/db/repl/apply_ops_test.cpp83
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.cpp3
-rw-r--r--src/mongo/db/repl/bgsync.h22
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change.cpp5
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp241
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp65
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp11
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp68
-rw-r--r--src/mongo/db/repl/database_cloner.cpp43
-rw-r--r--src/mongo/db/repl/database_cloner_test.cpp111
-rw-r--r--src/mongo/db/repl/databases_cloner_test.cpp68
-rw-r--r--src/mongo/db/repl/dbcheck.cpp32
-rw-r--r--src/mongo/db/repl/dbcheck.h4
-rw-r--r--src/mongo/db/repl/dbcheck_idl.h2
-rw-r--r--src/mongo/db/repl/do_txn.cpp8
-rw-r--r--src/mongo/db/repl/do_txn_test.cpp12
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper_test.cpp2
-rw-r--r--src/mongo/db/repl/idempotency_test_fixture.cpp7
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp28
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp94
-rw-r--r--src/mongo/db/repl/is_master_response.cpp27
-rw-r--r--src/mongo/db/repl/isself.cpp3
-rw-r--r--src/mongo/db/repl/member_config.cpp12
-rw-r--r--src/mongo/db/repl/member_config_test.cpp180
-rw-r--r--src/mongo/db/repl/member_data.cpp5
-rw-r--r--src/mongo/db/repl/mock_repl_coord_server_fixture.h2
-rw-r--r--src/mongo/db/repl/oplog.cpp27
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection.cpp9
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection_test.cpp41
-rw-r--r--src/mongo/db/repl/oplog_entry.cpp3
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp28
-rw-r--r--src/mongo/db/repl/oplog_interface_mock.cpp3
-rw-r--r--src/mongo/db/repl/oplog_test.cpp6
-rw-r--r--src/mongo/db/repl/optime_extract_test.cpp3
-rw-r--r--src/mongo/db/repl/read_concern_args.cpp38
-rw-r--r--src/mongo/db/repl/read_concern_args_test.cpp323
-rw-r--r--src/mongo/db/repl/repl_set_config.cpp112
-rw-r--r--src/mongo/db/repl/repl_set_config_checks.cpp47
-rw-r--r--src/mongo/db/repl/repl_set_config_checks_test.cpp432
-rw-r--r--src/mongo/db/repl/repl_set_config_test.cpp1272
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp7
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response.cpp15
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response_test.cpp128
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_impl.cpp21
-rw-r--r--src/mongo/db/repl/replication_coordinator.h12
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp22
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp12
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp371
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp72
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp131
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp1515
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp30
-rw-r--r--src/mongo/db/repl/replication_info.cpp5
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp3
-rw-r--r--src/mongo/db/repl/replication_recovery_test.cpp8
-rw-r--r--src/mongo/db/repl/reporter_test.cpp9
-rw-r--r--src/mongo/db/repl/roll_back_local_operations.cpp20
-rw-r--r--src/mongo/db/repl/roll_back_local_operations_test.cpp31
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp39
-rw-r--r--src/mongo/db/repl/rollback_impl.h2
-rw-r--r--src/mongo/db/repl/rollback_impl_test.cpp92
-rw-r--r--src/mongo/db/repl/rollback_source_impl.cpp4
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp7
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp48
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp633
-rw-r--r--src/mongo/db/repl/split_horizon_test.cpp3
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp42
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp42
-rw-r--r--src/mongo/db/repl/storage_interface_mock.h9
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp22
-rw-r--r--src/mongo/db/repl/sync_source_selector.h2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp34
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp170
-rw-r--r--src/mongo/db/repl/task_runner.cpp1
-rw-r--r--src/mongo/db/repl/topology_coordinator.cpp43
-rw-r--r--src/mongo/db/repl/topology_coordinator.h2
-rw-r--r--src/mongo/db/repl/topology_coordinator_v1_test.cpp1200
-rw-r--r--src/mongo/db/repl/vote_requester_test.cpp90
-rw-r--r--src/mongo/db/repl_index_build_state.h4
-rw-r--r--src/mongo/db/s/active_migrations_registry.cpp9
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.cpp4
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.h2
-rw-r--r--src/mongo/db/s/active_move_primaries_registry_test.cpp2
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.cpp6
-rw-r--r--src/mongo/db/s/add_shard_util.cpp2
-rw-r--r--src/mongo/db/s/add_shard_util.h2
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp12
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp6
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp5
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp16
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request.cpp12
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp9
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp10
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp6
-rw-r--r--src/mongo/db/s/collection_metadata.cpp3
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp9
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp3
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp18
-rw-r--r--src/mongo/db/s/collection_range_deleter.h16
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp3
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp23
-rw-r--r--src/mongo/db/s/config/configsvr_enable_sharding_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_move_primary_command.cpp7
-rw-r--r--src/mongo/db/s/config/configsvr_remove_shard_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp32
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp12
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp122
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp38
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp11
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp8
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp16
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp50
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp8
-rw-r--r--src/mongo/db/s/config_server_op_observer_test.cpp2
-rw-r--r--src/mongo/db/s/flush_database_cache_updates_command.cpp3
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp45
-rw-r--r--src/mongo/db/s/metadata_manager.cpp12
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp23
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp8
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp34
-rw-r--r--src/mongo/db/s/migration_session_id.cpp4
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp44
-rw-r--r--src/mongo/db/s/migration_util.cpp2
-rw-r--r--src/mongo/db/s/migration_util.h2
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp4
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp3
-rw-r--r--src/mongo/db/s/scoped_operation_completion_sharding_actions.h2
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.cpp38
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.cpp8
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp16
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp7
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp71
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp5
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp60
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod_test.cpp45
-rw-r--r--src/mongo/db/s/sharding_logging.cpp14
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection.cpp57
-rw-r--r--src/mongo/db/s/split_chunk.cpp18
-rw-r--r--src/mongo/db/s/transaction_coordinator.cpp7
-rw-r--r--src/mongo/db/s/transaction_coordinator_catalog.cpp4
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.cpp10
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.h43
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util_test.cpp6
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator_structures_test.cpp3
-rw-r--r--src/mongo/db/s/transaction_coordinator_test.cpp65
-rw-r--r--src/mongo/db/s/transaction_coordinator_util.cpp45
-rw-r--r--src/mongo/db/s/txn_two_phase_commit_cmds.cpp11
-rw-r--r--src/mongo/db/s/type_shard_identity_test.cpp21
-rw-r--r--src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp4
-rw-r--r--src/mongo/db/server_options.h18
-rw-r--r--src/mongo/db/server_options_helpers.h28
-rw-r--r--src/mongo/db/service_context_test_fixture.h6
-rw-r--r--src/mongo/db/service_entry_point_common.cpp37
-rw-r--r--src/mongo/db/session_catalog_mongod.cpp14
-rw-r--r--src/mongo/db/session_catalog_test.cpp42
-rw-r--r--src/mongo/db/sessions_collection_config_server.h18
-rw-r--r--src/mongo/db/sorter/sorter.cpp40
-rw-r--r--src/mongo/db/sorter/sorter.h2
-rw-r--r--src/mongo/db/startup_warnings_common.cpp6
-rw-r--r--src/mongo/db/startup_warnings_mongod.cpp14
-rw-r--r--src/mongo/db/stats/counters.cpp2
-rw-r--r--src/mongo/db/stats/counters.h2
-rw-r--r--src/mongo/db/stats/fine_clock.h2
-rw-r--r--src/mongo/db/stats/timer_stats.cpp2
-rw-r--r--src/mongo/db/stats/timer_stats.h2
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store.cpp5
-rw-r--r--src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp8
-rw-r--r--src/mongo/db/storage/biggie/store.h8
-rw-r--r--src/mongo/db/storage/biggie/store_test.cpp4
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp2
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.h2
-rw-r--r--src/mongo/db/storage/capped_callback.h2
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.h2
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.cpp4
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp6
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp2
-rw-r--r--src/mongo/db/storage/journal_listener.h2
-rw-r--r--src/mongo/db/storage/key_string.cpp20
-rw-r--r--src/mongo/db/storage/key_string_test.cpp7
-rw-r--r--src/mongo/db/storage/kv/durable_catalog_test.cpp3
-rw-r--r--src/mongo/db/storage/kv/kv_engine.h2
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp9
-rw-r--r--src/mongo/db/storage/kv/kv_prefix.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_prefix.h2
-rw-r--r--src/mongo/db/storage/kv/temporary_kv_record_store.h3
-rw-r--r--src/mongo/db/storage/mobile/mobile_session_pool.h4
-rw-r--r--src/mongo/db/storage/record_store.h2
-rw-r--r--src/mongo/db/storage/record_store_test_harness.cpp4
-rw-r--r--src/mongo/db/storage/record_store_test_randomiter.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_recorditer.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_recordstore.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_repairiter.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_storagesize.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_touch.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_truncate.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_updaterecord.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_updatewithdamages.cpp2
-rw-r--r--src/mongo/db/storage/remove_saver.cpp2
-rw-r--r--src/mongo/db/storage/snapshot.h2
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp96
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp45
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp23
-rw-r--r--src/mongo/db/storage/storage_engine.h4
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp27
-rw-r--r--src/mongo/db/storage/storage_engine_init.cpp21
-rw-r--r--src/mongo/db/storage/storage_engine_interface.h2
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_posix.cpp34
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_test.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_windows.cpp12
-rw-r--r--src/mongo/db/storage/storage_engine_metadata.cpp66
-rw-r--r--src/mongo/db/storage/storage_engine_metadata_test.cpp2
-rw-r--r--src/mongo/db/storage/storage_file_util.cpp12
-rw-r--r--src/mongo/db/storage/storage_init.cpp13
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp15
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp12
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp10
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp31
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp38
-rw-r--r--src/mongo/db/system_index.cpp16
-rw-r--r--src/mongo/db/traffic_reader.cpp4
-rw-r--r--src/mongo/db/traffic_recorder.cpp2
-rw-r--r--src/mongo/db/traffic_recorder_validators.cpp4
-rw-r--r--src/mongo/db/transaction_history_iterator.cpp3
-rw-r--r--src/mongo/db/transaction_participant.cpp104
-rw-r--r--src/mongo/db/transaction_participant_test.cpp187
-rw-r--r--src/mongo/db/update/addtoset_node.cpp3
-rw-r--r--src/mongo/db/update/addtoset_node_test.cpp2
-rw-r--r--src/mongo/db/update/arithmetic_node.cpp16
-rw-r--r--src/mongo/db/update/arithmetic_node_test.cpp2
-rw-r--r--src/mongo/db/update/bit_node.cpp15
-rw-r--r--src/mongo/db/update/bit_node.h2
-rw-r--r--src/mongo/db/update/bit_node_test.cpp4
-rw-r--r--src/mongo/db/update/compare_node_test.cpp2
-rw-r--r--src/mongo/db/update/current_date_node_test.cpp4
-rw-r--r--src/mongo/db/update/field_checker_test.cpp4
-rw-r--r--src/mongo/db/update/log_builder.cpp16
-rw-r--r--src/mongo/db/update/modifier_node.cpp18
-rw-r--r--src/mongo/db/update/object_replace_executor.cpp3
-rw-r--r--src/mongo/db/update/object_replace_executor_test.cpp2
-rw-r--r--src/mongo/db/update/path_support.cpp18
-rw-r--r--src/mongo/db/update/path_support_test.cpp20
-rw-r--r--src/mongo/db/update/pipeline_executor_test.cpp2
-rw-r--r--src/mongo/db/update/pop_node.cpp3
-rw-r--r--src/mongo/db/update/pull_node_test.cpp2
-rw-r--r--src/mongo/db/update/pullall_node.cpp2
-rw-r--r--src/mongo/db/update/pullall_node_test.cpp2
-rw-r--r--src/mongo/db/update/push_node.cpp6
-rw-r--r--src/mongo/db/update/push_node_test.cpp15
-rw-r--r--src/mongo/db/update/rename_node.cpp16
-rw-r--r--src/mongo/db/update/rename_node_test.cpp5
-rw-r--r--src/mongo/db/update/set_node_test.cpp2
-rw-r--r--src/mongo/db/update/storage_validation.cpp3
-rw-r--r--src/mongo/db/update/unset_node_test.cpp2
-rw-r--r--src/mongo/db/update/update_array_node.h2
-rw-r--r--src/mongo/db/update/update_driver.cpp16
-rw-r--r--src/mongo/db/update/update_leaf_node.cpp10
-rw-r--r--src/mongo/db/update/update_object_node.cpp33
-rw-r--r--src/mongo/db/update/update_object_node.h2
-rw-r--r--src/mongo/db/update/update_serialization_test.cpp2
-rw-r--r--src/mongo/db/update_index_data.cpp2
-rw-r--r--src/mongo/db/update_index_data.h2
-rw-r--r--src/mongo/db/update_index_data_test.cpp2
-rw-r--r--src/mongo/db/views/durable_view_catalog.cpp4
-rw-r--r--src/mongo/db/views/resolved_view_test.cpp27
-rw-r--r--src/mongo/db/views/view_catalog.cpp3
-rw-r--r--src/mongo/db/views/view_catalog_test.cpp3
-rw-r--r--src/mongo/db/views/view_graph.cpp6
-rw-r--r--src/mongo/db/write_concern.cpp2
741 files changed, 7181 insertions, 12838 deletions
diff --git a/src/mongo/db/auth/authorization_manager_impl.cpp b/src/mongo/db/auth/authorization_manager_impl.cpp
index 3342212c020..76c52b67947 100644
--- a/src/mongo/db/auth/authorization_manager_impl.cpp
+++ b/src/mongo/db/auth/authorization_manager_impl.cpp
@@ -432,8 +432,7 @@ Status AuthorizationManagerImpl::_initializeUserFromPrivilegeDocument(User* user
return Status(ErrorCodes::BadValue,
str::stream() << "User name from privilege document \"" << userName
<< "\" doesn't match name of provided User \""
- << user->getName().getUser()
- << "\"");
+ << user->getName().getUser() << "\"");
}
user->setID(parser.extractUserIDFromUserDocument(privDoc));
diff --git a/src/mongo/db/auth/authorization_manager_test.cpp b/src/mongo/db/auth/authorization_manager_test.cpp
index f51437f697a..9edef97e315 100644
--- a/src/mongo/db/auth/authorization_manager_test.cpp
+++ b/src/mongo/db/auth/authorization_manager_test.cpp
@@ -126,9 +126,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2User) {
<< "v2read"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
@@ -141,9 +139,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2User) {
<< "v2cluster"
<< "db"
<< "admin"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "clusterAdmin"
<< "db"
@@ -248,19 +244,17 @@ public:
private:
Status _getUserDocument(OperationContext* opCtx, const UserName& userName, BSONObj* userDoc) {
- Status status = findOne(opCtx,
- AuthorizationManager::usersCollectionNamespace,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << userName.getUser()
- << AuthorizationManager::USER_DB_FIELD_NAME
- << userName.getDB()),
- userDoc);
+ Status status =
+ findOne(opCtx,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << userName.getUser() << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getDB()),
+ userDoc);
if (status == ErrorCodes::NoMatchingDocument) {
status = Status(ErrorCodes::UserNotFound,
str::stream() << "Could not find user \"" << userName.getUser()
- << "\" for db \""
- << userName.getDB()
- << "\"");
+ << "\" for db \"" << userName.getDB() << "\"");
}
return status;
}
@@ -296,9 +290,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2UserWithUnrecognizedActions) {
<< "myUser"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "myRole"
<< "db"
diff --git a/src/mongo/db/auth/authorization_session_impl.cpp b/src/mongo/db/auth/authorization_session_impl.cpp
index fd186c06d7d..c308e3f5304 100644
--- a/src/mongo/db/auth/authorization_session_impl.cpp
+++ b/src/mongo/db/auth/authorization_session_impl.cpp
@@ -493,8 +493,7 @@ Status AuthorizationSessionImpl::checkAuthorizedToGrantPrivilege(const Privilege
ActionType::grantRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to grant privileges on the "
- << resource.databaseToMatch()
- << "database");
+ << resource.databaseToMatch() << "database");
}
} else if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName("admin"),
ActionType::grantRole)) {
@@ -514,8 +513,7 @@ Status AuthorizationSessionImpl::checkAuthorizedToRevokePrivilege(const Privileg
ActionType::revokeRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to revoke privileges on the "
- << resource.databaseToMatch()
- << "database");
+ << resource.databaseToMatch() << "database");
}
} else if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName("admin"),
ActionType::revokeRole)) {
@@ -1001,9 +999,7 @@ bool AuthorizationSessionImpl::isImpersonating() const {
auto AuthorizationSessionImpl::checkCursorSessionPrivilege(
OperationContext* const opCtx, const boost::optional<LogicalSessionId> cursorSessionId)
-> Status {
- auto nobodyIsLoggedIn = [authSession = this] {
- return !authSession->isAuthenticated();
- };
+ auto nobodyIsLoggedIn = [authSession = this] { return !authSession->isAuthenticated(); };
auto authHasImpersonatePrivilege = [authSession = this] {
return authSession->isAuthorizedForPrivilege(
@@ -1037,13 +1033,12 @@ auto AuthorizationSessionImpl::checkCursorSessionPrivilege(
// Operation Context (which implies a background job
!authHasImpersonatePrivilege() // Or if the user has an impersonation privilege, in which
// case, the user gets to sidestep certain checks.
- ) {
+ ) {
return Status{ErrorCodes::Unauthorized,
- str::stream() << "Cursor session id ("
- << sessionIdToStringOrNone(cursorSessionId)
- << ") is not the same as the operation context's session id ("
- << sessionIdToStringOrNone(opCtx->getLogicalSessionId())
- << ")"};
+ str::stream()
+ << "Cursor session id (" << sessionIdToStringOrNone(cursorSessionId)
+ << ") is not the same as the operation context's session id ("
+ << sessionIdToStringOrNone(opCtx->getLogicalSessionId()) << ")"};
}
return Status::OK();
diff --git a/src/mongo/db/auth/authorization_session_test.cpp b/src/mongo/db/auth/authorization_session_test.cpp
index b0ec73151e4..b7e589d0f9b 100644
--- a/src/mongo/db/auth/authorization_session_test.cpp
+++ b/src/mongo/db/auth/authorization_session_test.cpp
@@ -178,9 +178,7 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -206,9 +204,7 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
<< "admin"
<< "db"
<< "admin"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWriteAnyDatabase"
<< "db"
@@ -252,9 +248,7 @@ TEST_F(AuthorizationSessionTest, DuplicateRolesOK) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -284,9 +278,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "rw"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -301,9 +293,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "useradmin"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "userAdmin"
<< "db"
@@ -315,9 +305,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "rwany"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWriteAnyDatabase"
<< "db"
@@ -333,9 +321,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "useradminany"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "userAdminAnyDatabase"
<< "db"
@@ -412,9 +398,7 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -444,9 +428,7 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
@@ -489,9 +471,7 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -522,9 +502,7 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
@@ -558,9 +536,7 @@ TEST_F(AuthorizationSessionTest, AcquireUserObtainsAndValidatesAuthenticationRes
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -568,8 +544,7 @@ TEST_F(AuthorizationSessionTest, AcquireUserObtainsAndValidatesAuthenticationRes
<< "authenticationRestrictions"
<< BSON_ARRAY(BSON("clientSource" << BSON_ARRAY("192.168.0.0/24"
<< "192.168.2.10")
- << "serverAddress"
- << BSON_ARRAY("192.168.0.2"))
+ << "serverAddress" << BSON_ARRAY("192.168.0.2"))
<< BSON("clientSource" << BSON_ARRAY("2001:DB8::1") << "serverAddress"
<< BSON_ARRAY("2001:DB8::2"))
<< BSON("clientSource" << BSON_ARRAY("127.0.0.1"
@@ -911,11 +886,9 @@ TEST_F(AuthorizationSessionTest, CanAggregateOutWithInsertAndRemoveOnTargetNames
uassertStatusOK(authzSession->getPrivilegesForAggregate(testFooNss, cmdObj, false));
ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges));
- BSONObj cmdObjNoBypassDocumentValidation = BSON(
- "aggregate" << testFooNss.coll() << "pipeline" << pipeline << "bypassDocumentValidation"
- << false
- << "cursor"
- << BSONObj());
+ BSONObj cmdObjNoBypassDocumentValidation =
+ BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline
+ << "bypassDocumentValidation" << false << "cursor" << BSONObj());
privileges = uassertStatusOK(authzSession->getPrivilegesForAggregate(
testFooNss, cmdObjNoBypassDocumentValidation, false));
ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges));
@@ -928,10 +901,8 @@ TEST_F(AuthorizationSessionTest,
Privilege(testBarCollResource, {ActionType::insert, ActionType::remove})});
BSONArray pipeline = BSON_ARRAY(BSON("$out" << testBarNss.coll()));
- BSONObj cmdObj =
- BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj()
- << "bypassDocumentValidation"
- << true);
+ BSONObj cmdObj = BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor"
+ << BSONObj() << "bypassDocumentValidation" << true);
PrivilegeVector privileges =
uassertStatusOK(authzSession->getPrivilegesForAggregate(testFooNss, cmdObj, false));
ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges));
@@ -946,10 +917,8 @@ TEST_F(AuthorizationSessionTest,
{ActionType::insert, ActionType::remove, ActionType::bypassDocumentValidation})});
BSONArray pipeline = BSON_ARRAY(BSON("$out" << testBarNss.coll()));
- BSONObj cmdObj =
- BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj()
- << "bypassDocumentValidation"
- << true);
+ BSONObj cmdObj = BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor"
+ << BSONObj() << "bypassDocumentValidation" << true);
PrivilegeVector privileges =
uassertStatusOK(authzSession->getPrivilegesForAggregate(testFooNss, cmdObj, true));
ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges));
@@ -1144,9 +1113,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsNotCoauthorizedWithEmptyUser
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
@@ -1163,9 +1130,7 @@ TEST_F(AuthorizationSessionTest,
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
@@ -1180,9 +1145,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsCoauthorizedWithIntersecting
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(managerState->insertPrivilegeDocument(_opCtx.get(),
@@ -1190,9 +1153,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsCoauthorizedWithIntersecting
<< "admin"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
@@ -1210,9 +1171,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsNotCoauthorizedWithNoninters
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(managerState->insertPrivilegeDocument(_opCtx.get(),
@@ -1220,9 +1179,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsNotCoauthorizedWithNoninters
<< "admin"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
@@ -1241,9 +1198,7 @@ TEST_F(AuthorizationSessionTest,
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(managerState->insertPrivilegeDocument(_opCtx.get(),
@@ -1251,9 +1206,7 @@ TEST_F(AuthorizationSessionTest,
<< "admin"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index e3184bef814..c5bf63894f5 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -89,11 +89,8 @@ Status AuthzManagerExternalStateLocal::getStoredAuthorizationVersion(OperationCo
str::stream()
<< "Could not determine schema version of authorization data. "
"Bad (non-numeric) type "
- << typeName(versionElement.type())
- << " ("
- << versionElement.type()
- << ") for "
- << AuthorizationManager::schemaVersionFieldName
+ << typeName(versionElement.type()) << " (" << versionElement.type()
+ << ") for " << AuthorizationManager::schemaVersionFieldName
<< " field in version document");
}
} else if (status == ErrorCodes::NoMatchingDocument) {
@@ -132,8 +129,7 @@ void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privileges
"",
std::string(str::stream() << "Skipped privileges on resource "
<< privileges[i].getResourcePattern().toString()
- << ". Reason: "
- << errmsg)));
+ << ". Reason: " << errmsg)));
}
}
}
@@ -179,11 +175,8 @@ Status AuthzManagerExternalStateLocal::getUserDescription(OperationContext* opCt
userRoles << BSON("role" << role.getRole() << "db" << role.getDB());
}
*result = BSON("_id" << userName.getUser() << "user" << userName.getUser() << "db"
- << userName.getDB()
- << "credentials"
- << BSON("external" << true)
- << "roles"
- << userRoles.arr());
+ << userName.getDB() << "credentials" << BSON("external" << true)
+ << "roles" << userRoles.arr());
}
BSONElement directRolesElement;
@@ -285,17 +278,14 @@ Status AuthzManagerExternalStateLocal::_getUserDocument(OperationContext* opCtx,
Status status = findOne(opCtx,
AuthorizationManager::usersCollectionNamespace,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << userName.getUser()
- << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getUser() << AuthorizationManager::USER_DB_FIELD_NAME
<< userName.getDB()),
userDoc);
if (status == ErrorCodes::NoMatchingDocument) {
- status =
- Status(ErrorCodes::UserNotFound,
- str::stream() << "Could not find user \"" << userName.getUser() << "\" for db \""
- << userName.getDB()
- << "\"");
+ status = Status(ErrorCodes::UserNotFound,
+ str::stream() << "Could not find user \"" << userName.getUser()
+ << "\" for db \"" << userName.getDB() << "\"");
}
return status;
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index acaf8389712..6e365f1a7b4 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -83,8 +83,7 @@ void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privileges
"",
std::string(str::stream() << "Skipped privileges on resource "
<< privileges[i].getResourcePattern().toString()
- << ". Reason: "
- << errmsg)));
+ << ". Reason: " << errmsg)));
}
}
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.cpp b/src/mongo/db/auth/authz_manager_external_state_s.cpp
index fdb23453592..8969faa3a60 100644
--- a/src/mongo/db/auth/authz_manager_external_state_s.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_s.cpp
@@ -128,12 +128,8 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC
<< userName.getUser()
<< AuthorizationManager::USER_DB_FIELD_NAME
<< userName.getDB()))
- << "showPrivileges"
- << true
- << "showCredentials"
- << true
- << "showAuthenticationRestrictions"
- << true);
+ << "showPrivileges" << true << "showCredentials" << true
+ << "showAuthenticationRestrictions" << true);
BSONObjBuilder builder;
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand(
opCtx, "admin", usersInfoCmd, &builder);
@@ -150,10 +146,9 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC
if (foundUsers.size() > 1) {
return Status(ErrorCodes::UserDataInconsistent,
- str::stream() << "Found multiple users on the \"" << userName.getDB()
- << "\" database with name \""
- << userName.getUser()
- << "\"");
+ str::stream()
+ << "Found multiple users on the \"" << userName.getDB()
+ << "\" database with name \"" << userName.getUser() << "\"");
}
*result = foundUsers[0].Obj().getOwned();
return Status::OK();
@@ -163,10 +158,9 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC
BSONArrayBuilder userRolesBuilder;
auto& sslPeerInfo = SSLPeerInfo::forSession(opCtx->getClient()->session());
for (const RoleName& role : sslPeerInfo.roles) {
- userRolesBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << role.getDB()));
+ userRolesBuilder.append(BSON(
+ AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()));
}
BSONArray providedRoles = userRolesBuilder.arr();
@@ -195,16 +189,12 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC
"Recieved malformed response to request for X509 roles from config server");
}
- *result = BSON("_id" << userName.getUser() << "user" << userName.getUser() << "db"
- << userName.getDB()
- << "credentials"
- << BSON("external" << true)
- << "roles"
- << BSONArray(cmdResult["roles"].Obj())
- << "inheritedRoles"
- << BSONArray(cmdResult["inheritedRoles"].Obj())
- << "inheritedPrivileges"
- << BSONArray(cmdResult["inheritedPrivileges"].Obj()));
+ *result =
+ BSON("_id" << userName.getUser() << "user" << userName.getUser() << "db"
+ << userName.getDB() << "credentials" << BSON("external" << true) << "roles"
+ << BSONArray(cmdResult["roles"].Obj()) << "inheritedRoles"
+ << BSONArray(cmdResult["inheritedRoles"].Obj()) << "inheritedPrivileges"
+ << BSONArray(cmdResult["inheritedPrivileges"].Obj()));
return Status::OK();
}
}
@@ -216,11 +206,11 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(
AuthenticationRestrictionsFormat showRestrictions,
BSONObj* result) {
BSONObjBuilder rolesInfoCmd;
- rolesInfoCmd.append("rolesInfo",
- BSON_ARRAY(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << roleName.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << roleName.getDB())));
+ rolesInfoCmd.append(
+ "rolesInfo",
+ BSON_ARRAY(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB())));
addShowToBuilder(&rolesInfoCmd, showPrivileges, showRestrictions);
BSONObjBuilder builder;
@@ -239,9 +229,7 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(
if (foundRoles.size() > 1) {
return Status(ErrorCodes::RoleDataInconsistent,
str::stream() << "Found multiple roles on the \"" << roleName.getDB()
- << "\" database with name \""
- << roleName.getRole()
- << "\"");
+ << "\" database with name \"" << roleName.getRole() << "\"");
}
*result = foundRoles[0].Obj().getOwned();
return Status::OK();
@@ -256,8 +244,7 @@ Status AuthzManagerExternalStateMongos::getRolesDescription(
for (const RoleName& roleName : roles) {
rolesInfoCmdArray << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << roleName.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
<< roleName.getDB());
}
diff --git a/src/mongo/db/auth/privilege_parser_test.cpp b/src/mongo/db/auth/privilege_parser_test.cpp
index 288760ffb0d..969360a6f51 100644
--- a/src/mongo/db/auth/privilege_parser_test.cpp
+++ b/src/mongo/db/auth/privilege_parser_test.cpp
@@ -56,24 +56,21 @@ TEST(PrivilegeParserTest, IsValidTest) {
<< ""
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
// resource can't have db without collection
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
// resource can't have collection without db
parsedPrivilege.parseBSON(BSON("resource" << BSON("collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
@@ -82,8 +79,7 @@ TEST(PrivilegeParserTest, IsValidTest) {
<< ""
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
@@ -92,8 +88,7 @@ TEST(PrivilegeParserTest, IsValidTest) {
<< "test"
<< "collection"
<< "foo")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
@@ -116,8 +111,7 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
<< ""
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -143,8 +137,7 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
<< "test"
<< "collection"
<< "foo")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -171,8 +164,7 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
<< "test"
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -198,8 +190,7 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
<< ""
<< "collection"
<< "foo")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
diff --git a/src/mongo/db/auth/role_graph.cpp b/src/mongo/db/auth/role_graph.cpp
index 8093864dfe3..b05a29fff4d 100644
--- a/src/mongo/db/auth/role_graph.cpp
+++ b/src/mongo/db/auth/role_graph.cpp
@@ -167,8 +167,8 @@ Status RoleGraph::addRoleToRole(const RoleName& recipient, const RoleName& role)
}
if (isBuiltinRole(recipient)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot grant roles to built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot grant roles to built-in role: " << role.getFullName());
}
if (!roleExists(role)) {
return Status(ErrorCodes::RoleNotFound,
@@ -193,8 +193,8 @@ Status RoleGraph::removeRoleFromRole(const RoleName& recipient, const RoleName&
}
if (isBuiltinRole(recipient)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot remove roles from built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot remove roles from built-in role: " << role.getFullName());
}
if (!roleExists(role)) {
return Status(ErrorCodes::RoleNotFound,
@@ -207,8 +207,9 @@ Status RoleGraph::removeRoleFromRole(const RoleName& recipient, const RoleName&
_roleToMembers[role].erase(itToRm);
} else {
return Status(ErrorCodes::RolesNotRelated,
- str::stream() << recipient.getFullName() << " is not a member"
- " of "
+ str::stream() << recipient.getFullName()
+ << " is not a member"
+ " of "
<< role.getFullName());
}
@@ -227,8 +228,8 @@ Status RoleGraph::removeAllRolesFromRole(const RoleName& victim) {
}
if (isBuiltinRole(victim)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot remove roles from built-in role: "
- << victim.getFullName());
+ str::stream()
+ << "Cannot remove roles from built-in role: " << victim.getFullName());
}
RoleNameVector& subordinatesOfVictim = _roleToSubordinates[victim];
@@ -253,8 +254,8 @@ Status RoleGraph::addPrivilegeToRole(const RoleName& role, const Privilege& priv
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot grant privileges to built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot grant privileges to built-in role: " << role.getFullName());
}
_addPrivilegeToRoleNoChecks(role, privilegeToAdd);
@@ -277,8 +278,8 @@ Status RoleGraph::addPrivilegesToRole(const RoleName& role,
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot grant privileges to built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot grant privileges to built-in role: " << role.getFullName());
}
for (PrivilegeVector::const_iterator it = privilegesToAdd.begin(); it != privilegesToAdd.end();
@@ -296,8 +297,8 @@ Status RoleGraph::removePrivilegeFromRole(const RoleName& role,
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot remove privileges from built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot remove privileges from built-in role: " << role.getFullName());
}
PrivilegeVector& currentPrivileges = _directPrivilegesForRole[role];
@@ -325,8 +326,9 @@ Status RoleGraph::removePrivilegeFromRole(const RoleName& role,
}
}
return Status(ErrorCodes::PrivilegeNotFound,
- str::stream() << "Role: " << role.getFullName() << " does not "
- "contain any privileges on "
+ str::stream() << "Role: " << role.getFullName()
+ << " does not "
+ "contain any privileges on "
<< privilegeToRemove.getResourcePattern().toString());
}
@@ -350,8 +352,8 @@ Status RoleGraph::removeAllPrivilegesFromRole(const RoleName& role) {
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot remove privileges from built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot remove privileges from built-in role: " << role.getFullName());
}
_directPrivilegesForRole[role].clear();
return Status::OK();
@@ -434,8 +436,8 @@ Status RoleGraph::_recomputePrivilegeDataHelper(const RoleName& startingRole,
if (!roleExists(currentRole)) {
return Status(ErrorCodes::RoleNotFound,
- str::stream() << "Role: " << currentRole.getFullName()
- << " does not exist");
+ str::stream()
+ << "Role: " << currentRole.getFullName() << " does not exist");
}
// Check for cycles
diff --git a/src/mongo/db/auth/role_graph_test.cpp b/src/mongo/db/auth/role_graph_test.cpp
index a2ed3dece7f..765d3d3c61c 100644
--- a/src/mongo/db/auth/role_graph_test.cpp
+++ b/src/mongo/db/auth/role_graph_test.cpp
@@ -196,7 +196,7 @@ TEST(RoleGraphTest, AddRemoveRoles) {
* |
* v
* D
- */
+ */
it = graph.getDirectSubordinates(roleA); // should be roleB and roleC, order doesn't matter
@@ -825,26 +825,22 @@ TEST(RoleGraphTest, AddRoleFromDocument) {
<< "dbA"
<< "collection"
<< "collA")
- << "actions"
- << BSON_ARRAY("insert"))),
+ << "actions" << BSON_ARRAY("insert"))),
BSON_ARRAY(BSON("resource" << BSON("db"
<< "dbB"
<< "collection"
<< "collB")
- << "actions"
- << BSON_ARRAY("insert"))
+ << "actions" << BSON_ARRAY("insert"))
<< BSON("resource" << BSON("db"
<< "dbC"
<< "collection"
<< "collC")
- << "actions"
- << BSON_ARRAY("compact"))),
+ << "actions" << BSON_ARRAY("compact"))),
BSON_ARRAY(BSON("resource" << BSON("db"
<< ""
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find"))),
+ << "actions" << BSON_ARRAY("find"))),
};
const BSONArray restrictions[] = {
@@ -922,33 +918,28 @@ TEST(RoleGraphTest, AddRoleFromDocumentWithRestricitonMerge) {
BSON_ARRAY(BSON("serverAddress" << BSON_ARRAY("127.0.0.1/8")));
RoleGraph graph;
- ASSERT_OK(graph.addRoleFromDocument(BSON("_id"
- << "dbA.roleA"
- << "role"
- << "roleA"
- << "db"
- << "dbA"
- << "privileges"
- << BSONArray()
- << "roles"
- << BSONArray()
- << "authenticationRestrictions"
- << roleARestrictions)));
- ASSERT_OK(graph.addRoleFromDocument(BSON("_id"
- << "dbB.roleB"
- << "role"
- << "roleB"
- << "db"
- << "dbB"
- << "privileges"
- << BSONArray()
- << "roles"
- << BSON_ARRAY(BSON("role"
- << "roleA"
- << "db"
- << "dbA"))
- << "authenticationRestrictions"
- << roleBRestrictions)));
+ ASSERT_OK(
+ graph.addRoleFromDocument(BSON("_id"
+ << "dbA.roleA"
+ << "role"
+ << "roleA"
+ << "db"
+ << "dbA"
+ << "privileges" << BSONArray() << "roles" << BSONArray()
+ << "authenticationRestrictions" << roleARestrictions)));
+ ASSERT_OK(
+ graph.addRoleFromDocument(BSON("_id"
+ << "dbB.roleB"
+ << "role"
+ << "roleB"
+ << "db"
+ << "dbB"
+ << "privileges" << BSONArray() << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "roleA"
+ << "db"
+ << "dbA"))
+ << "authenticationRestrictions" << roleBRestrictions)));
ASSERT_OK(graph.recomputePrivilegeData());
const auto A = graph.getDirectAuthenticationRestrictions(RoleName("roleA", "dbA"));
diff --git a/src/mongo/db/auth/role_graph_update.cpp b/src/mongo/db/auth/role_graph_update.cpp
index 02c89f36bd7..33ee260fa93 100644
--- a/src/mongo/db/auth/role_graph_update.cpp
+++ b/src/mongo/db/auth/role_graph_update.cpp
@@ -92,9 +92,7 @@ Status checkIdMatchesRoleName(const BSONElement& idElement, const RoleName& role
return Status(ErrorCodes::FailedToParse,
str::stream() << "Role document _id fields must be encoded as the string "
"dbname.rolename. Found "
- << idField
- << " for "
- << roleName.getFullName());
+ << idField << " for " << roleName.getFullName());
}
return Status::OK();
}
@@ -312,16 +310,13 @@ Status handleOplogCommand(RoleGraph* roleGraph, const BSONObj& cmdObj) {
if (cmdName == "createIndexes" &&
cmdObj.firstElement().str() == rolesCollectionNamespace.coll()) {
UnorderedFieldsBSONObjComparator instance;
- if (instance.evaluate(cmdObj == (BSON("createIndexes"
- << "system.roles"
- << "v"
- << 2
- << "name"
- << "role_1_db_1"
- << "key"
- << BSON("role" << 1 << "db" << 1)
- << "unique"
- << true)))) {
+ if (instance.evaluate(
+ cmdObj ==
+ (BSON("createIndexes"
+ << "system.roles"
+ << "v" << 2 << "name"
+ << "role_1_db_1"
+ << "key" << BSON("role" << 1 << "db" << 1) << "unique" << true)))) {
return Status::OK();
}
}
diff --git a/src/mongo/db/auth/sasl_authentication_session_test.cpp b/src/mongo/db/auth/sasl_authentication_session_test.cpp
index 97750182061..e849832d6ff 100644
--- a/src/mongo/db/auth/sasl_authentication_session_test.cpp
+++ b/src/mongo/db/auth/sasl_authentication_session_test.cpp
@@ -131,19 +131,17 @@ SaslConversation::SaslConversation(std::string mech)
<< scram::Secrets<SHA256Block>::generateCredentials(
"frim", saslGlobalParams.scramSHA256IterationCount.load()));
- ASSERT_OK(authManagerExternalState->insert(opCtx.get(),
- NamespaceString("admin.system.users"),
- BSON("_id"
- << "test.andy"
- << "user"
- << "andy"
- << "db"
- << "test"
- << "credentials"
- << creds
- << "roles"
- << BSONArray()),
- BSONObj()));
+ ASSERT_OK(
+ authManagerExternalState->insert(opCtx.get(),
+ NamespaceString("admin.system.users"),
+ BSON("_id"
+ << "test.andy"
+ << "user"
+ << "andy"
+ << "db"
+ << "test"
+ << "credentials" << creds << "roles" << BSONArray()),
+ BSONObj()));
}
void SaslConversation::assertConversationFailure() {
diff --git a/src/mongo/db/auth/sasl_mechanism_registry.cpp b/src/mongo/db/auth/sasl_mechanism_registry.cpp
index 2de9fb02fee..bfe479143d3 100644
--- a/src/mongo/db/auth/sasl_mechanism_registry.cpp
+++ b/src/mongo/db/auth/sasl_mechanism_registry.cpp
@@ -79,8 +79,7 @@ StatusWith<std::unique_ptr<ServerMechanismBase>> SASLServerMechanismRegistry::ge
return Status(ErrorCodes::BadValue,
str::stream() << "Unsupported mechanism '" << mechanismName
- << "' on authentication database '"
- << authenticationDatabase
+ << "' on authentication database '" << authenticationDatabase
<< "'");
}
@@ -147,9 +146,7 @@ bool SASLServerMechanismRegistry::_mechanismSupportedByConfig(StringData mechNam
namespace {
ServiceContext::ConstructorActionRegisterer SASLServerMechanismRegistryInitializer{
- "CreateSASLServerMechanismRegistry",
- {"EndStartupOptionStorage"},
- [](ServiceContext* service) {
+ "CreateSASLServerMechanismRegistry", {"EndStartupOptionStorage"}, [](ServiceContext* service) {
SASLServerMechanismRegistry::set(service,
std::make_unique<SASLServerMechanismRegistry>(
saslGlobalParams.authenticationMechanisms));
diff --git a/src/mongo/db/auth/sasl_mechanism_registry_test.cpp b/src/mongo/db/auth/sasl_mechanism_registry_test.cpp
index 6ca988bc9ae..b16df4ec3f8 100644
--- a/src/mongo/db/auth/sasl_mechanism_registry_test.cpp
+++ b/src/mongo/db/auth/sasl_mechanism_registry_test.cpp
@@ -27,11 +27,11 @@
* it in the license file.
*/
-#include "mongo/db/auth/sasl_mechanism_registry.h"
#include "mongo/crypto/mechanism_scram.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_manager_impl.h"
#include "mongo/db/auth/authz_manager_external_state_mock.h"
+#include "mongo/db/auth/sasl_mechanism_registry.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/unittest/unittest.h"
@@ -201,8 +201,7 @@ public:
<< "credentials"
<< BSON("SCRAM-SHA-256"
<< scram::Secrets<SHA256Block>::generateCredentials("sajack‍", 15000))
- << "roles"
- << BSONArray()),
+ << "roles" << BSONArray()),
BSONObj()));
@@ -214,10 +213,8 @@ public:
<< "sajack"
<< "db"
<< "$external"
- << "credentials"
- << BSON("external" << true)
- << "roles"
- << BSONArray()),
+ << "credentials" << BSON("external" << true)
+ << "roles" << BSONArray()),
BSONObj()));
internalSecurity.user = std::make_shared<User>(UserName("__system", "local"));
diff --git a/src/mongo/db/auth/sasl_options_init.cpp b/src/mongo/db/auth/sasl_options_init.cpp
index b83a94fa1c0..51ba683342b 100644
--- a/src/mongo/db/auth/sasl_options_init.cpp
+++ b/src/mongo/db/auth/sasl_options_init.cpp
@@ -95,4 +95,4 @@ MONGO_INITIALIZER_GENERAL(StoreSASLOptions, ("CoreOptions_Store"), ("EndStartupO
(InitializerContext* const context) {
return storeSASLOptions(moe::startupOptionsParsed);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_plain_server_conversation.cpp b/src/mongo/db/auth/sasl_plain_server_conversation.cpp
index 5ef2cf6ac9e..0a88084dea3 100644
--- a/src/mongo/db/auth/sasl_plain_server_conversation.cpp
+++ b/src/mongo/db/auth/sasl_plain_server_conversation.cpp
@@ -60,8 +60,9 @@ StatusWith<bool> trySCRAM(const User::CredentialData& credentials, StringData pw
reinterpret_cast<const std::uint8_t*>(decodedSalt.c_str()) +
decodedSalt.size()),
scram.iterationCount));
- if (scram.storedKey != base64::encode(reinterpret_cast<const char*>(secrets.storedKey().data()),
- secrets.storedKey().size())) {
+ if (scram.storedKey !=
+ base64::encode(reinterpret_cast<const char*>(secrets.storedKey().data()),
+ secrets.storedKey().size())) {
return Status(ErrorCodes::AuthenticationFailed,
str::stream() << "Incorrect user name or password");
}
diff --git a/src/mongo/db/auth/sasl_plain_server_conversation.h b/src/mongo/db/auth/sasl_plain_server_conversation.h
index 26acd1e0aac..d3c6af215ce 100644
--- a/src/mongo/db/auth/sasl_plain_server_conversation.h
+++ b/src/mongo/db/auth/sasl_plain_server_conversation.h
@@ -49,8 +49,9 @@ public:
static constexpr bool isInternal = true;
bool canMakeMechanismForUser(const User* user) const final {
auto credentials = user->getCredentials();
- return !credentials.isExternal && (credentials.scram<SHA1Block>().isValid() ||
- credentials.scram<SHA256Block>().isValid());
+ return !credentials.isExternal &&
+ (credentials.scram<SHA1Block>().isValid() ||
+ credentials.scram<SHA256Block>().isValid());
}
};
diff --git a/src/mongo/db/auth/sasl_scram_server_conversation.cpp b/src/mongo/db/auth/sasl_scram_server_conversation.cpp
index 04a8e53798a..fc223097b4f 100644
--- a/src/mongo/db/auth/sasl_scram_server_conversation.cpp
+++ b/src/mongo/db/auth/sasl_scram_server_conversation.cpp
@@ -99,8 +99,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_fir
return Status(ErrorCodes::BadValue,
str::stream()
<< "Incorrect number of arguments for first SCRAM client message, got "
- << got
- << " expected at least 3");
+ << got << " expected at least 3");
};
/**
@@ -168,8 +167,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_fir
if (!authzId.empty() && ServerMechanismBase::_principalName != authzId) {
return Status(ErrorCodes::BadValue,
str::stream() << "SCRAM user name " << ServerMechanismBase::_principalName
- << " does not match authzid "
- << authzId);
+ << " does not match authzid " << authzId);
}
if (!str::startsWith(input[1], "r=") || input[1].size() < 6) {
@@ -267,7 +265,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_fir
* e=message
*
* NOTE: we are ignoring the channel binding part of the message
-**/
+ **/
template <typename Policy>
StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_secondStep(
OperationContext* opCtx, StringData inputData) {
@@ -275,8 +273,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_sec
return Status(ErrorCodes::BadValue,
str::stream()
<< "Incorrect number of arguments for second SCRAM client message, got "
- << got
- << " expected at least 3");
+ << got << " expected at least 3");
};
/**
@@ -322,9 +319,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_sec
return Status(ErrorCodes::BadValue,
str::stream()
<< "Unmatched SCRAM nonce received from client in second step, expected "
- << _nonce
- << " but received "
- << nonce);
+ << _nonce << " but received " << nonce);
}
// Do server side computations, compare storedKeys and generate client-final-message
diff --git a/src/mongo/db/auth/sasl_scram_test.cpp b/src/mongo/db/auth/sasl_scram_test.cpp
index b16d58b9288..2c9f5fc3acc 100644
--- a/src/mongo/db/auth/sasl_scram_test.cpp
+++ b/src/mongo/db/auth/sasl_scram_test.cpp
@@ -62,16 +62,10 @@ BSONObj generateSCRAMUserDocument(StringData username, StringData password) {
const auto sha256Cred =
scram::Secrets<SHA256Block>::generateCredentials(password.toString(), 15000);
return BSON("_id" << (str::stream() << database << "." << username).operator StringData()
- << AuthorizationManager::USER_NAME_FIELD_NAME
- << username
- << AuthorizationManager::USER_DB_FIELD_NAME
- << database
- << "credentials"
- << BSON("SCRAM-SHA-1" << sha1Cred << "SCRAM-SHA-256" << sha256Cred)
- << "roles"
- << BSONArray()
- << "privileges"
- << BSONArray());
+ << AuthorizationManager::USER_NAME_FIELD_NAME << username
+ << AuthorizationManager::USER_DB_FIELD_NAME << database << "credentials"
+ << BSON("SCRAM-SHA-1" << sha1Cred << "SCRAM-SHA-256" << sha256Cred) << "roles"
+ << BSONArray() << "privileges" << BSONArray());
}
std::string corruptEncodedPayload(const std::string& message,
@@ -302,7 +296,6 @@ TEST_F(SCRAMFixture, testServerStep1DoesNotIncludeNonceFromClientStep1) {
std::string::iterator nonceBegin = serverMessage.begin() + serverMessage.find("r=");
std::string::iterator nonceEnd = std::find(nonceBegin, serverMessage.end(), ',');
serverMessage = serverMessage.replace(nonceBegin, nonceEnd, "r=");
-
});
ASSERT_EQ(
SCRAMStepsResult(SaslTestState(SaslTestState::kClient, 2),
@@ -348,7 +341,6 @@ TEST_F(SCRAMFixture, testClientStep2GivesBadProof) {
std::string::iterator proofEnd = std::find(proofBegin, clientMessage.end(), ',');
clientMessage = clientMessage.replace(
proofBegin, proofEnd, corruptEncodedPayload(clientMessage, proofBegin, proofEnd));
-
});
ASSERT_EQ(SCRAMStepsResult(SaslTestState(SaslTestState::kServer, 2),
@@ -378,7 +370,6 @@ TEST_F(SCRAMFixture, testServerStep2GivesBadVerifier) {
encodedVerifier = corruptEncodedPayload(serverMessage, verifierBegin, verifierEnd);
serverMessage = serverMessage.replace(verifierBegin, verifierEnd, encodedVerifier);
-
});
auto result = runSteps(mutator);
diff --git a/src/mongo/db/auth/security_file.cpp b/src/mongo/db/auth/security_file.cpp
index 04efa479fbc..0dc4bfafe23 100644
--- a/src/mongo/db/auth/security_file.cpp
+++ b/src/mongo/db/auth/security_file.cpp
@@ -74,8 +74,8 @@ StatusWith<std::vector<std::string>> readSecurityFile(const std::string& filenam
// check obvious file errors
if (stat(filename.c_str(), &stats) == -1) {
return Status(ErrorCodes::InvalidPath,
- str::stream() << "Error reading file " << filename << ": "
- << strerror(errno));
+ str::stream()
+ << "Error reading file " << filename << ": " << strerror(errno));
}
#if !defined(_WIN32)
diff --git a/src/mongo/db/auth/user.cpp b/src/mongo/db/auth/user.cpp
index ce869ea28f5..96d1251c316 100644
--- a/src/mongo/db/auth/user.cpp
+++ b/src/mongo/db/auth/user.cpp
@@ -160,7 +160,7 @@ void User::addPrivileges(const PrivilegeVector& privileges) {
}
}
-void User::setRestrictions(RestrictionDocuments restrictions)& {
+void User::setRestrictions(RestrictionDocuments restrictions) & {
_restrictions = std::move(restrictions);
}
diff --git a/src/mongo/db/auth/user_document_parser.cpp b/src/mongo/db/auth/user_document_parser.cpp
index 1c5da7795be..8eb6dc7a94b 100644
--- a/src/mongo/db/auth/user_document_parser.cpp
+++ b/src/mongo/db/auth/user_document_parser.cpp
@@ -152,8 +152,8 @@ Status V2UserDocumentParser::checkValidUserDocument(const BSONObj& doc) const {
StringData userDBStr = userDBElement.valueStringData();
if (!NamespaceString::validDBName(userDBStr, NamespaceString::DollarInDbNameBehavior::Allow) &&
userDBStr != "$external") {
- return _badValue(str::stream() << "'" << userDBStr
- << "' is not a valid value for the db field.");
+ return _badValue(str::stream()
+ << "'" << userDBStr << "' is not a valid value for the db field.");
}
// Validate the "credentials" element
@@ -184,8 +184,8 @@ Status V2UserDocumentParser::checkValidUserDocument(const BSONObj& doc) const {
str::stream() << fieldName << " does not exist");
}
if (scramElement.type() != Object) {
- return _badValue(str::stream() << fieldName
- << " credential must be an object, if present");
+ return _badValue(str::stream()
+ << fieldName << " credential must be an object, if present");
}
return Status::OK();
};
diff --git a/src/mongo/db/auth/user_document_parser_test.cpp b/src/mongo/db/auth/user_document_parser_test.cpp
index af798f525a6..44721c6570d 100644
--- a/src/mongo/db/auth/user_document_parser_test.cpp
+++ b/src/mongo/db/auth/user_document_parser_test.cpp
@@ -83,23 +83,18 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "pwd"
<< "a"
- << "roles"
- << BSON_ARRAY("read"))));
+ << "roles" << BSON_ARRAY("read"))));
// Need name field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< emptyArray)));
// Need source field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< emptyArray)));
// Need credentials field
@@ -107,16 +102,14 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "roles"
- << emptyArray)));
+ << "roles" << emptyArray)));
// Need roles field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials)));
+ << "credentials" << credentials)));
// authenticationRestricitons must be an array if it exists
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
@@ -131,11 +124,8 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
- << emptyArray
- << "authenticationRestrictions"
+ << "credentials" << credentials << "roles"
+ << emptyArray << "authenticationRestrictions"
<< emptyArray)));
// Empty roles arrays are OK
@@ -143,9 +133,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< emptyArray)));
// Need credentials of {external: true} if user's db is $external
@@ -153,19 +141,15 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "$external"
- << "credentials"
- << BSON("external" << true)
- << "roles"
- << emptyArray)));
+ << "credentials" << BSON("external" << true)
+ << "roles" << emptyArray)));
// Roles must be objects
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY("read"))));
// Role needs name
@@ -173,9 +157,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("db"
<< "dbA")))));
@@ -184,9 +166,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA")))));
@@ -196,9 +176,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
@@ -209,9 +187,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
@@ -227,9 +203,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "authenticationRestrictions"
+ << "credentials" << credentials << "authenticationRestrictions"
<< BSON_ARRAY(BSON("clientSource" << BSON_ARRAY("127.0.0.1/8") << "serverAddress"
<< BSON_ARRAY("127.0.0.1/8")))
<< "roles"
@@ -243,9 +217,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "extraData"
+ << "credentials" << credentials << "extraData"
<< BSON("foo"
<< "bar")
<< "roles"
@@ -318,13 +290,13 @@ TEST_F(V2UserDocumentParsing, V2CredentialExtraction) {
ASSERT(!user->getCredentials().isExternal);
// Make sure extracting valid combined credentials works
- ASSERT_OK(v2parser.initializeUserCredentialsFromUserDocument(user.get(),
- BSON("user"
- << "spencer"
- << "db"
- << "test"
- << "credentials"
- << credentials)));
+ ASSERT_OK(
+ v2parser.initializeUserCredentialsFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << credentials)));
ASSERT(user->getCredentials().scram_sha1.isValid());
ASSERT(user->getCredentials().scram_sha256.isValid());
ASSERT(!user->getCredentials().isExternal);
@@ -350,18 +322,18 @@ TEST_F(V2UserDocumentParsing, V2RoleExtraction) {
user.get()));
// V1-style roles arrays no longer work
- ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
- << "spencer"
- << "roles"
- << BSON_ARRAY("read")),
- user.get()));
+ ASSERT_NOT_OK(
+ v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles" << BSON_ARRAY("read")),
+ user.get()));
// Roles must have "db" field
- ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
- << "spencer"
- << "roles"
- << BSON_ARRAY(BSONObj())),
- user.get()));
+ ASSERT_NOT_OK(
+ v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles" << BSON_ARRAY(BSONObj())),
+ user.get()));
ASSERT_NOT_OK(
v2parser.initializeUserRolesFromUserDocument(BSON("user"
@@ -428,16 +400,14 @@ TEST_F(V2UserDocumentParsing, V2AuthenticationRestrictionsExtraction) {
ASSERT_OK(v2parser.initializeAuthenticationRestrictionsFromUserDocument(
BSON("user"
<< "spencer"
- << "authenticationRestrictions"
- << emptyArray),
+ << "authenticationRestrictions" << emptyArray),
user.get()));
// authenticationRestrictions must have at least one of "clientSource"/"serverAdddress" fields
ASSERT_NOT_OK(v2parser.initializeAuthenticationRestrictionsFromUserDocument(
BSON("user"
<< "spencer"
- << "authenticationRestrictions"
- << BSON_ARRAY(emptyObj)),
+ << "authenticationRestrictions" << BSON_ARRAY(emptyObj)),
user.get()));
// authenticationRestrictions must not have unexpected elements
diff --git a/src/mongo/db/auth/user_management_commands_parser.cpp b/src/mongo/db/auth/user_management_commands_parser.cpp
index 29f4bc53574..0d380888ac9 100644
--- a/src/mongo/db/auth/user_management_commands_parser.cpp
+++ b/src/mongo/db/auth/user_management_commands_parser.cpp
@@ -64,8 +64,9 @@ Status _checkNoExtraFields(const BSONObj& cmdObj,
StringData fieldName = (*iter).fieldNameStringData();
if (!isGenericArgument(fieldName) && !validFieldNames.count(fieldName.toString())) {
return Status(ErrorCodes::BadValue,
- str::stream() << "\"" << fieldName << "\" is not "
- "a valid argument to "
+ str::stream() << "\"" << fieldName
+ << "\" is not "
+ "a valid argument to "
<< cmdName);
}
}
@@ -175,8 +176,9 @@ Status parseRolePossessionManipulationCommands(const BSONObj& cmdObj,
if (!parsedRoleNames->size()) {
return Status(ErrorCodes::BadValue,
- str::stream() << cmdName << " command requires a non-empty "
- "\"roles\" array");
+ str::stream() << cmdName
+ << " command requires a non-empty "
+ "\"roles\" array");
}
return Status::OK();
}
@@ -634,8 +636,9 @@ Status parseAndValidateRolePrivilegeManipulationCommands(const BSONObj& cmdObj,
}
if (!parsedPrivileges->size()) {
return Status(ErrorCodes::BadValue,
- str::stream() << cmdName << " command requires a non-empty "
- "\"privileges\" array");
+ str::stream() << cmdName
+ << " command requires a non-empty "
+ "\"privileges\" array");
}
return Status::OK();
diff --git a/src/mongo/db/baton.cpp b/src/mongo/db/baton.cpp
index 29d973fe3e7..f648c3e13ed 100644
--- a/src/mongo/db/baton.cpp
+++ b/src/mongo/db/baton.cpp
@@ -80,7 +80,7 @@ public:
}
}
- _baton->schedule([ this, anchor = shared_from_this() ](Status status) {
+ _baton->schedule([this, anchor = shared_from_this()](Status status) {
_runJobs(stdx::unique_lock(_mutex), status);
});
}
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index d9481ded941..708f93ce0db 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -64,8 +64,8 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while truncating collection: "
- << collectionName);
+ str::stream()
+ << "Not primary while truncating collection: " << collectionName);
}
Database* db = autoDb.getDb();
@@ -91,8 +91,8 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam
repl::ReplicationCoordinator::modeNone) &&
collectionName.isOplog()) {
return Status(ErrorCodes::OplogOperationUnsupported,
- str::stream() << "Cannot truncate a live oplog while replicating: "
- << collectionName);
+ str::stream()
+ << "Cannot truncate a live oplog while replicating: " << collectionName);
}
BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
@@ -140,8 +140,7 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
uassert(ErrorCodes::NamespaceExists,
str::stream() << "cloneCollectionAsCapped failed - destination collection " << toNss
- << " already exists. source collection: "
- << fromNss,
+ << " already exists. source collection: " << fromNss,
!db->getCollection(opCtx, toNss));
// create new collection
@@ -269,8 +268,7 @@ void convertToCapped(OperationContext* opCtx,
uassertStatusOKWithContext(tmpNameResult,
str::stream()
<< "Cannot generate temporary collection namespace to convert "
- << collectionName
- << " to a capped collection");
+ << collectionName << " to a capped collection");
const auto& longTmpName = tmpNameResult.getValue();
const auto shortTmpName = longTmpName.coll().toString();
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index 95c1381f91e..da12b2bb6b2 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -125,8 +125,7 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
fassert(40689,
{ErrorCodes::InternalError,
str::stream() << "failed to get index spec for index " << indexName
- << " in collection "
- << collNss.toString()});
+ << " in collection " << collNss.toString()});
}
auto indexesToRebuild = indexSpecs.getValue();
invariant(
@@ -171,8 +170,8 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
// Note that the collection name already includes the database component.
auto collection = db->getCollection(opCtx, collNss);
invariant(collection,
- str::stream() << "failed to get valid collection pointer for namespace "
- << collNss);
+ str::stream()
+ << "failed to get valid collection pointer for namespace " << collNss);
auto uuid = collection->uuid();
invariant(uuid);
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 569dbb1bd0a..f63c3f9deb8 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -134,8 +134,8 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
cmr.idx = coll->getIndexCatalog()->findIndexByName(opCtx, indexName);
if (!cmr.idx) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "cannot find index " << indexName << " for ns "
- << nss);
+ str::stream()
+ << "cannot find index " << indexName << " for ns " << nss);
}
} else {
std::vector<const IndexDescriptor*> indexes;
@@ -145,17 +145,14 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
if (indexes.size() > 1) {
return Status(ErrorCodes::AmbiguousIndexKeyPattern,
str::stream() << "index keyPattern " << keyPattern << " matches "
- << indexes.size()
- << " indexes,"
+ << indexes.size() << " indexes,"
<< " must use index name. "
- << "Conflicting indexes:"
- << indexes[0]->infoObj()
- << ", "
- << indexes[1]->infoObj());
+ << "Conflicting indexes:" << indexes[0]->infoObj()
+ << ", " << indexes[1]->infoObj());
} else if (indexes.empty()) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "cannot find index " << keyPattern << " for ns "
- << nss);
+ str::stream()
+ << "cannot find index " << keyPattern << " for ns " << nss);
}
cmr.idx = indexes[0];
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index 03f94006a7b..d42a94133e5 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -251,9 +251,8 @@ private:
mongo::stdx::unordered_map<CollectionUUID, NamespaceString, CollectionUUID::Hash>>
_shadowCatalog;
- using CollectionCatalogMap = mongo::stdx::unordered_map<CollectionUUID,
- std::unique_ptr<Collection>,
- CollectionUUID::Hash>;
+ using CollectionCatalogMap = mongo::stdx::
+ unordered_map<CollectionUUID, std::unique_ptr<Collection>, CollectionUUID::Hash>;
using OrderedCollectionMap = std::map<std::pair<std::string, CollectionUUID>, Collection*>;
using NamespaceCollectionMap = mongo::stdx::unordered_map<NamespaceString, Collection*>;
CollectionCatalogMap _catalog;
diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp
index 32bf6ab8047..c033dcfcada 100644
--- a/src/mongo/db/catalog/collection_catalog_test.cpp
+++ b/src/mongo/db/catalog/collection_catalog_test.cpp
@@ -121,7 +121,7 @@ public:
void checkCollections(std::string dbName) {
unsigned long counter = 0;
- for (auto[orderedIt, catalogIt] = std::tuple{collsIterator(dbName), catalog.begin(dbName)};
+ for (auto [orderedIt, catalogIt] = std::tuple{collsIterator(dbName), catalog.begin(dbName)};
catalogIt != catalog.end() && orderedIt != collsIteratorEnd(dbName);
++catalogIt, ++orderedIt) {
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index c3e886d27eb..867cc6fdf3b 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -93,9 +93,7 @@ StatusWith<CompactStats> compactCollection(OperationContext* opCtx,
return StatusWith<CompactStats>(
ErrorCodes::CannotCreateIndex,
str::stream() << "Cannot compact collection due to invalid index " << spec
- << ": "
- << keyStatus.reason()
- << " For more info see"
+ << ": " << keyStatus.reason() << " For more info see"
<< " http://dochub.mongodb.org/core/index-validation");
}
indexSpecs.push_back(spec);
diff --git a/src/mongo/db/catalog/collection_compact.h b/src/mongo/db/catalog/collection_compact.h
index b17b0ec3886..c3fab5e37ba 100644
--- a/src/mongo/db/catalog/collection_compact.h
+++ b/src/mongo/db/catalog/collection_compact.h
@@ -36,9 +36,9 @@
namespace mongo {
/**
- * Compacts collection.
- * See record_store.h for CompactStats and CompactOptions definitions.
- */
+ * Compacts collection.
+ * See record_store.h for CompactStats and CompactOptions definitions.
+ */
StatusWith<CompactStats> compactCollection(OperationContext* opCtx,
Collection* collection,
const CompactOptions* options);
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index a2aa0a917a4..1711a0a9242 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -330,10 +330,8 @@ StatusWithMatchExpression CollectionImpl::parseValidator(
if (ns().isOnInternalDb()) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Document validators are not allowed on collection " << ns().ns()
- << (_uuid ? " with UUID " + _uuid->toString() : "")
- << " in the "
- << ns().db()
- << " internal database"};
+ << (_uuid ? " with UUID " + _uuid->toString() : "") << " in the "
+ << ns().db() << " internal database"};
}
boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, _collator.get()));
@@ -423,8 +421,9 @@ Status CollectionImpl::insertDocuments(OperationContext* opCtx,
const auto firstIdElem = data["first_id"];
// If the failpoint specifies no collection or matches the existing one, hang.
if ((!collElem || _ns.ns() == collElem.str()) &&
- (!firstIdElem || (begin != end && firstIdElem.type() == mongo::String &&
- begin->doc["_id"].str() == firstIdElem.str()))) {
+ (!firstIdElem ||
+ (begin != end && firstIdElem.type() == mongo::String &&
+ begin->doc["_id"].str() == firstIdElem.str()))) {
string whenFirst =
firstIdElem ? (string(" when first _id is ") + firstIdElem.str()) : "";
while (MONGO_FAIL_POINT(hangAfterCollectionInserts)) {
@@ -680,9 +679,7 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx,
if (_recordStore->isCapped() && oldSize != newDoc.objsize())
uasserted(ErrorCodes::CannotGrowDocumentInCappedNamespace,
str::stream() << "Cannot change the size of a document in a capped collection: "
- << oldSize
- << " != "
- << newDoc.objsize());
+ << oldSize << " != " << newDoc.objsize());
args->preImageDoc = oldDoc.value().getOwned();
@@ -857,11 +854,9 @@ Status CollectionImpl::setValidator(OperationContext* opCtx, BSONObj validatorDo
DurableCatalog::get(opCtx)->updateValidator(
opCtx, ns(), validatorDoc, getValidationLevel(), getValidationAction());
- opCtx->recoveryUnit()->onRollback([
- this,
- oldValidator = std::move(_validator),
- oldValidatorDoc = std::move(_validatorDoc)
- ]() mutable {
+ opCtx->recoveryUnit()->onRollback([this,
+ oldValidator = std::move(_validator),
+ oldValidatorDoc = std::move(_validatorDoc)]() mutable {
this->_validator = std::move(oldValidator);
this->_validatorDoc = std::move(oldValidatorDoc);
});
@@ -937,13 +932,11 @@ Status CollectionImpl::updateValidator(OperationContext* opCtx,
StringData newAction) {
invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
- opCtx->recoveryUnit()->onRollback([
- this,
- oldValidator = std::move(_validator),
- oldValidatorDoc = std::move(_validatorDoc),
- oldValidationLevel = _validationLevel,
- oldValidationAction = _validationAction
- ]() mutable {
+ opCtx->recoveryUnit()->onRollback([this,
+ oldValidator = std::move(_validator),
+ oldValidatorDoc = std::move(_validatorDoc),
+ oldValidationLevel = _validationLevel,
+ oldValidationAction = _validationAction]() mutable {
this->_validator = std::move(oldValidator);
this->_validatorDoc = std::move(oldValidatorDoc);
this->_validationLevel = oldValidationLevel;
@@ -1276,10 +1269,8 @@ void addErrorIfUnequal(T stored, T cached, StringData name, ValidateResults* res
if (stored != cached) {
results->valid = false;
results->errors.push_back(str::stream() << "stored value for " << name
- << " does not match cached value: "
- << stored
- << " != "
- << cached);
+ << " does not match cached value: " << stored
+ << " != " << cached);
}
}
@@ -1383,8 +1374,8 @@ Status CollectionImpl::validate(OperationContext* opCtx,
opCtx, _indexCatalog.get(), &indexNsResultsMap, &keysPerIndex, level, results, output);
if (!results->valid) {
- log(LogComponent::kIndex) << "validating collection " << ns() << " failed"
- << uuidString;
+ log(LogComponent::kIndex)
+ << "validating collection " << ns() << " failed" << uuidString;
} else {
log(LogComponent::kIndex) << "validated collection " << ns() << uuidString;
}
diff --git a/src/mongo/db/catalog/collection_options.cpp b/src/mongo/db/catalog/collection_options.cpp
index a156754bf14..1732dfef374 100644
--- a/src/mongo/db/catalog/collection_options.cpp
+++ b/src/mongo/db/catalog/collection_options.cpp
@@ -256,9 +256,9 @@ Status CollectionOptions::parse(const BSONObj& options, ParseKind kind) {
idIndex = std::move(tempIdIndex);
} else if (!createdOn24OrEarlier && !mongo::isGenericArgument(fieldName)) {
return Status(ErrorCodes::InvalidOptions,
- str::stream() << "The field '" << fieldName
- << "' is not a valid collection option. Options: "
- << options);
+ str::stream()
+ << "The field '" << fieldName
+ << "' is not a valid collection option. Options: " << options);
}
}
@@ -414,4 +414,4 @@ bool CollectionOptions::matchesStorageOptions(const CollectionOptions& other,
return true;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_options.h b/src/mongo/db/catalog/collection_options.h
index 2558df0f5ea..6d63c00441d 100644
--- a/src/mongo/db/catalog/collection_options.h
+++ b/src/mongo/db/catalog/collection_options.h
@@ -143,4 +143,4 @@ struct CollectionOptions {
// The aggregation pipeline that defines this view.
BSONObj pipeline;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 1d15a592b1d..36154c5b9b0 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -236,11 +236,10 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
<< " - existing collection with conflicting UUID " << uuid
<< " is in a drop-pending state: " << *currentName;
return Result(Status(ErrorCodes::NamespaceExists,
- str::stream() << "existing collection "
- << currentName->toString()
- << " with conflicting UUID "
- << uuid.toString()
- << " is in a drop-pending state."));
+ str::stream()
+ << "existing collection " << currentName->toString()
+ << " with conflicting UUID " << uuid.toString()
+ << " is in a drop-pending state."));
}
// In the case of oplog replay, a future command may have created or renamed a
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index 7f7c570d6c0..943350b0c89 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -123,9 +123,7 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, StringData ns, boo
auto duplicates = _getNamesWithConflictingCasing_inlock(dbname);
uassert(ErrorCodes::DatabaseDifferCase,
str::stream() << "db already exists with different case already have: ["
- << *duplicates.cbegin()
- << "] trying to create ["
- << dbname.toString()
+ << *duplicates.cbegin() << "] trying to create [" << dbname.toString()
<< "]",
duplicates.empty());
@@ -241,8 +239,8 @@ void DatabaseHolderImpl::closeAll(OperationContext* opCtx) {
// It is the caller's responsibility to ensure that no index builds are active in the
// database.
invariant(!coll->getIndexCatalog()->haveAnyIndexesInProgress(),
- str::stream() << "An index is building on collection '" << coll->ns()
- << "'.");
+ str::stream()
+ << "An index is building on collection '" << coll->ns() << "'.");
}
dbs.insert(i->first);
}
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 3371914f7c0..d3e1bebdff2 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -366,8 +366,7 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
auto numIndexesInProgress = collection->getIndexCatalog()->numIndexesInProgress(opCtx);
massert(ErrorCodes::BackgroundOperationInProgressForNamespace,
str::stream() << "cannot drop collection " << nss << " (" << uuidString << ") when "
- << numIndexesInProgress
- << " index builds in progress.",
+ << numIndexesInProgress << " index builds in progress.",
numIndexesInProgress == 0);
audit::logDropCollection(&cc(), nss.toString());
@@ -566,9 +565,7 @@ void DatabaseImpl::_checkCanCreateCollection(OperationContext* opCtx,
// This check only applies for actual collections, not indexes or other types of ns.
uassert(17381,
str::stream() << "fully qualified namespace " << nss << " is too long "
- << "(max is "
- << NamespaceString::MaxNsCollectionLen
- << " bytes)",
+ << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)",
!nss.isNormal() || nss.size() <= NamespaceString::MaxNsCollectionLen);
uassert(17316, "cannot create a blank collection", nss.coll() > 0);
@@ -623,8 +620,8 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
bool generatedUUID = false;
if (!optionsWithUUID.uuid) {
if (!canAcceptWrites) {
- std::string msg = str::stream() << "Attempted to create a new collection " << nss
- << " without a UUID";
+ std::string msg = str::stream()
+ << "Attempted to create a new collection " << nss << " without a UUID";
severe() << msg;
uasserted(ErrorCodes::InvalidOptions, msg);
} else {
@@ -726,8 +723,7 @@ StatusWith<NamespaceString> DatabaseImpl::makeUniqueCollectionNamespace(
"model for collection name "
<< collectionNameModel
<< " must contain at least one percent sign within first "
- << maxModelLength
- << " characters.");
+ << maxModelLength << " characters.");
}
if (!_uniqueCollectionNamespacePseudoRandom) {
@@ -766,9 +762,7 @@ StatusWith<NamespaceString> DatabaseImpl::makeUniqueCollectionNamespace(
return Status(
ErrorCodes::NamespaceExists,
str::stream() << "Cannot generate collection name for temporary collection with model "
- << collectionNameModel
- << " after "
- << numGenerationAttempts
+ << collectionNameModel << " after " << numGenerationAttempts
<< " attempts due to namespace conflicts with existing collections.");
}
@@ -897,8 +891,7 @@ Status DatabaseImpl::userCreateNS(OperationContext* opCtx,
} else {
invariant(createCollection(opCtx, nss, collectionOptions, createDefaultIndexes, idIndex),
str::stream() << "Collection creation failed after validating options: " << nss
- << ". Options: "
- << collectionOptions.toBSON());
+ << ". Options: " << collectionOptions.toBSON());
}
return Status::OK();
diff --git a/src/mongo/db/catalog/database_test.cpp b/src/mongo/db/catalog/database_test.cpp
index 81d2dcf777d..9a919eeabac 100644
--- a/src/mongo/db/catalog/database_test.cpp
+++ b/src/mongo/db/catalog/database_test.cpp
@@ -158,13 +158,13 @@ TEST_F(DatabaseTest, CreateCollectionThrowsExceptionWhenDatabaseIsInADropPending
// tests.
ON_BLOCK_EXIT([&wuow] { wuow.commit(); });
- ASSERT_THROWS_CODE_AND_WHAT(
- db->createCollection(_opCtx.get(), _nss),
- AssertionException,
- ErrorCodes::DatabaseDropPending,
- (StringBuilder() << "Cannot create collection " << _nss
- << " - database is in the process of being dropped.")
- .stringData());
+ ASSERT_THROWS_CODE_AND_WHAT(db->createCollection(_opCtx.get(), _nss),
+ AssertionException,
+ ErrorCodes::DatabaseDropPending,
+ (StringBuilder()
+ << "Cannot create collection " << _nss
+ << " - database is in the process of being dropped.")
+ .stringData());
});
}
@@ -297,11 +297,10 @@ void _testDropCollectionThrowsExceptionIfThereAreIndexesInProgress(OperationCont
auto indexCatalog = collection->getIndexCatalog();
ASSERT_EQUALS(indexCatalog->numIndexesInProgress(opCtx), 0);
- auto indexInfoObj = BSON(
- "v" << int(IndexDescriptor::kLatestIndexVersion) << "key" << BSON("a" << 1) << "name"
- << "a_1"
- << "ns"
- << nss.ns());
+ auto indexInfoObj = BSON("v" << int(IndexDescriptor::kLatestIndexVersion) << "key"
+ << BSON("a" << 1) << "name"
+ << "a_1"
+ << "ns" << nss.ns());
auto indexBuildBlock =
indexCatalog->createIndexBuildBlock(opCtx, indexInfoObj, IndexBuildMethod::kHybrid);
@@ -418,8 +417,7 @@ TEST_F(DatabaseTest, MakeUniqueCollectionNamespaceReplacesPercentSignsWithRandom
auto nss1 = unittest::assertGet(db->makeUniqueCollectionNamespace(_opCtx.get(), model));
if (!re.FullMatch(nss1.ns())) {
FAIL((StringBuilder() << "First generated namespace \"" << nss1.ns()
- << "\" does not match reqular expression \""
- << re.pattern()
+ << "\" does not match reqular expression \"" << re.pattern()
<< "\"")
.str());
}
@@ -436,8 +434,7 @@ TEST_F(DatabaseTest, MakeUniqueCollectionNamespaceReplacesPercentSignsWithRandom
auto nss2 = unittest::assertGet(db->makeUniqueCollectionNamespace(_opCtx.get(), model));
if (!re.FullMatch(nss2.ns())) {
FAIL((StringBuilder() << "Second generated namespace \"" << nss2.ns()
- << "\" does not match reqular expression \""
- << re.pattern()
+ << "\" does not match reqular expression \"" << re.pattern()
<< "\"")
.str());
}
@@ -530,28 +527,28 @@ TEST_F(DatabaseTest, AutoGetCollectionForReadCommandSucceedsWithDeadlineMin) {
}
TEST_F(DatabaseTest, CreateCollectionProhibitsReplicatedCollectionsWithoutIdIndex) {
- writeConflictRetry(
- _opCtx.get(),
- "testÇreateCollectionProhibitsReplicatedCollectionsWithoutIdIndex",
- _nss.ns(),
- [this] {
- AutoGetOrCreateDb autoDb(_opCtx.get(), _nss.db(), MODE_X);
- auto db = autoDb.getDb();
- ASSERT_TRUE(db);
-
- WriteUnitOfWork wuow(_opCtx.get());
-
- CollectionOptions options;
- options.setNoIdIndex();
-
- ASSERT_THROWS_CODE_AND_WHAT(
- db->createCollection(_opCtx.get(), _nss, options),
- AssertionException,
- 50001,
- (StringBuilder() << "autoIndexId:false is not allowed for collection " << _nss
- << " because it can be replicated")
- .stringData());
- });
+ writeConflictRetry(_opCtx.get(),
+ "testÇreateCollectionProhibitsReplicatedCollectionsWithoutIdIndex",
+ _nss.ns(),
+ [this] {
+ AutoGetOrCreateDb autoDb(_opCtx.get(), _nss.db(), MODE_X);
+ auto db = autoDb.getDb();
+ ASSERT_TRUE(db);
+
+ WriteUnitOfWork wuow(_opCtx.get());
+
+ CollectionOptions options;
+ options.setNoIdIndex();
+
+ ASSERT_THROWS_CODE_AND_WHAT(
+ db->createCollection(_opCtx.get(), _nss, options),
+ AssertionException,
+ 50001,
+ (StringBuilder()
+ << "autoIndexId:false is not allowed for collection " << _nss
+ << " because it can be replicated")
+ .stringData());
+ });
}
diff --git a/src/mongo/db/catalog/document_validation.h b/src/mongo/db/catalog/document_validation.h
index 27a7969c6d6..e27dfb11b66 100644
--- a/src/mongo/db/catalog/document_validation.h
+++ b/src/mongo/db/catalog/document_validation.h
@@ -84,4 +84,4 @@ public:
private:
boost::optional<DisableDocumentValidation> _documentValidationDisabler;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 2602c6e59d5..036e511fb7c 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -275,12 +275,11 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
}
if (!result.status.isOK()) {
- return result.status.withContext(
- str::stream() << "dropDatabase " << dbName << " failed waiting for "
- << numCollectionsToDrop
- << " collection drop(s) (most recent drop optime: "
- << awaitOpTime.toString()
- << ") to replicate.");
+ return result.status.withContext(str::stream()
+ << "dropDatabase " << dbName << " failed waiting for "
+ << numCollectionsToDrop
+ << " collection drop(s) (most recent drop optime: "
+ << awaitOpTime.toString() << ") to replicate.");
}
log() << "dropDatabase " << dbName << " - successfully dropped " << numCollectionsToDrop
@@ -301,8 +300,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << "Could not drop database " << dbName
<< " because it does not exist after dropping "
- << numCollectionsToDrop
- << " collection(s).");
+ << numCollectionsToDrop << " collection(s).");
}
bool userInitiatedWritesAndNotPrimary =
@@ -310,12 +308,11 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::PrimarySteppedDown,
- str::stream() << "Could not drop database " << dbName
- << " because we transitioned from PRIMARY to "
- << replCoord->getMemberState().toString()
- << " while waiting for "
- << numCollectionsToDrop
- << " pending collection drop(s).");
+ str::stream()
+ << "Could not drop database " << dbName
+ << " because we transitioned from PRIMARY to "
+ << replCoord->getMemberState().toString() << " while waiting for "
+ << numCollectionsToDrop << " pending collection drop(s).");
}
// _finishDropDatabase creates its own scope guard to ensure drop-pending is unset.
diff --git a/src/mongo/db/catalog/drop_database_test.cpp b/src/mongo/db/catalog/drop_database_test.cpp
index dcc43045646..16fc13ecb44 100644
--- a/src/mongo/db/catalog/drop_database_test.cpp
+++ b/src/mongo/db/catalog/drop_database_test.cpp
@@ -432,10 +432,10 @@ TEST_F(DropDatabaseTest,
auto status = dropDatabase(_opCtx.get(), _nss.db().toString());
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, status);
- ASSERT_EQUALS(
- status.reason(),
- std::string(str::stream() << "Could not drop database " << _nss.db()
- << " because it does not exist after dropping 1 collection(s)."));
+ ASSERT_EQUALS(status.reason(),
+ std::string(str::stream()
+ << "Could not drop database " << _nss.db()
+ << " because it does not exist after dropping 1 collection(s)."));
ASSERT_FALSE(AutoGetDb(_opCtx.get(), _nss.db(), MODE_X).getDb());
}
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index ef6074eaaff..2e3f2383c14 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -105,7 +105,6 @@ Status wrappedRun(OperationContext* opCtx,
collection->uuid(),
desc->indexName(),
desc->infoObj());
-
});
anObjBuilder->append("msg", "non-_id indexes dropped for collection");
@@ -121,16 +120,14 @@ Status wrappedRun(OperationContext* opCtx,
opCtx, indexElem.embeddedObject(), false, &indexes);
if (indexes.empty()) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "can't find index with key: "
- << indexElem.embeddedObject());
+ str::stream()
+ << "can't find index with key: " << indexElem.embeddedObject());
} else if (indexes.size() > 1) {
return Status(ErrorCodes::AmbiguousIndexKeyPattern,
- str::stream() << indexes.size() << " indexes found for key: "
- << indexElem.embeddedObject()
+ str::stream() << indexes.size()
+ << " indexes found for key: " << indexElem.embeddedObject()
<< ", identify by name instead."
- << " Conflicting indexes: "
- << indexes[0]->infoObj()
- << ", "
+ << " Conflicting indexes: " << indexes[0]->infoObj() << ", "
<< indexes[1]->infoObj());
}
@@ -166,23 +163,19 @@ Status wrappedRun(OperationContext* opCtx,
for (auto indexNameElem : indexElem.Array()) {
if (indexNameElem.type() != String) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "dropIndexes " << collection->ns() << " ("
- << collection->uuid()
- << ") failed to drop multiple indexes "
- << indexElem.toString(false)
- << ": index name must be a string");
+ str::stream()
+ << "dropIndexes " << collection->ns() << " ("
+ << collection->uuid() << ") failed to drop multiple indexes "
+ << indexElem.toString(false) << ": index name must be a string");
}
auto indexToDelete = indexNameElem.String();
auto status = dropIndexByName(opCtx, collection, indexCatalog, indexToDelete);
if (!status.isOK()) {
- return status.withContext(str::stream() << "dropIndexes " << collection->ns()
- << " ("
- << collection->uuid()
- << ") failed to drop multiple indexes "
- << indexElem.toString(false)
- << ": "
- << indexToDelete);
+ return status.withContext(
+ str::stream() << "dropIndexes " << collection->ns() << " ("
+ << collection->uuid() << ") failed to drop multiple indexes "
+ << indexElem.toString(false) << ": " << indexToDelete);
}
}
diff --git a/src/mongo/db/catalog/health_log.cpp b/src/mongo/db/catalog/health_log.cpp
index 0bd4171c262..2703dee4aa1 100644
--- a/src/mongo/db/catalog/health_log.cpp
+++ b/src/mongo/db/catalog/health_log.cpp
@@ -48,7 +48,7 @@ CollectionOptions getOptions(void) {
options.cappedSize = kDefaultHealthlogSize;
return options;
}
-}
+} // namespace
HealthLog::HealthLog() : _writer(nss, getOptions(), kMaxBufferSize) {}
@@ -78,4 +78,4 @@ bool HealthLog::log(const HealthLogEntry& entry) {
}
const NamespaceString HealthLog::nss("local", "system.healthlog");
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/health_log.h b/src/mongo/db/catalog/health_log.h
index 2b312f741fa..ba2bcbf440a 100644
--- a/src/mongo/db/catalog/health_log.h
+++ b/src/mongo/db/catalog/health_log.h
@@ -91,4 +91,4 @@ public:
private:
DeferredWriter _writer;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/index_build_block.cpp b/src/mongo/db/catalog/index_build_block.cpp
index adee836bfa5..c803d4aa600 100644
--- a/src/mongo/db/catalog/index_build_block.cpp
+++ b/src/mongo/db/catalog/index_build_block.cpp
@@ -110,7 +110,7 @@ Status IndexCatalogImpl::IndexBuildBlock::init(OperationContext* opCtx, Collecti
if (isBackgroundIndex) {
opCtx->recoveryUnit()->onCommit(
- [ entry = _entry, coll = collection ](boost::optional<Timestamp> commitTime) {
+ [entry = _entry, coll = collection](boost::optional<Timestamp> commitTime) {
// This will prevent the unfinished index from being visible on index iterators.
if (commitTime) {
entry->setMinimumVisibleSnapshot(commitTime.get());
@@ -169,7 +169,7 @@ void IndexCatalogImpl::IndexBuildBlock::success(OperationContext* opCtx, Collect
collection->indexBuildSuccess(opCtx, _entry);
opCtx->recoveryUnit()->onCommit(
- [ opCtx, entry = _entry, coll = collection ](boost::optional<Timestamp> commitTime) {
+ [opCtx, entry = _entry, coll = collection](boost::optional<Timestamp> commitTime) {
// Note: this runs after the WUOW commits but before we release our X lock on the
// collection. This means that any snapshot created after this must include the full
// index, and no one can try to read this index before we set the visibility.
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index 197cf85bb70..ab263abaa42 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -86,8 +86,7 @@ Status IndexBuildsManager::setUpIndexBuild(OperationContext* opCtx,
const auto& nss = collection->ns();
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X),
str::stream() << "Unable to set up index build " << buildUUID << ": collection "
- << nss.ns()
- << " is not locked in exclusive mode.");
+ << nss.ns() << " is not locked in exclusive mode.");
auto builder = _getBuilder(buildUUID);
diff --git a/src/mongo/db/catalog/index_builds_manager_test.cpp b/src/mongo/db/catalog/index_builds_manager_test.cpp
index 3ecb5dca2a1..df5e50d244c 100644
--- a/src/mongo/db/catalog/index_builds_manager_test.cpp
+++ b/src/mongo/db/catalog/index_builds_manager_test.cpp
@@ -76,8 +76,7 @@ std::vector<BSONObj> makeSpecs(const NamespaceString& nss, std::vector<std::stri
std::vector<BSONObj> indexSpecs;
for (auto keyName : keys) {
indexSpecs.push_back(BSON("ns" << nss.toString() << "v" << 2 << "key" << BSON(keyName << 1)
- << "name"
- << (keyName + "_1")));
+ << "name" << (keyName + "_1")));
}
return indexSpecs;
}
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index f10e51daa03..aa711498a71 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -317,8 +317,10 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
fassert(31164, status);
indexMetadataHasChanged = DurableCatalog::get(opCtx)->setIndexIsMultikey(
opCtx, _ns, _descriptor->indexName(), paths);
- opCtx->recoveryUnit()->onCommit([onMultikeyCommitFn, indexMetadataHasChanged](
- boost::optional<Timestamp>) { onMultikeyCommitFn(indexMetadataHasChanged); });
+ opCtx->recoveryUnit()->onCommit(
+ [onMultikeyCommitFn, indexMetadataHasChanged](boost::optional<Timestamp>) {
+ onMultikeyCommitFn(indexMetadataHasChanged);
+ });
wuow.commit();
});
} else {
@@ -326,8 +328,10 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
opCtx, _ns, _descriptor->indexName(), paths);
}
- opCtx->recoveryUnit()->onCommit([onMultikeyCommitFn, indexMetadataHasChanged](
- boost::optional<Timestamp>) { onMultikeyCommitFn(indexMetadataHasChanged); });
+ opCtx->recoveryUnit()->onCommit(
+ [onMultikeyCommitFn, indexMetadataHasChanged](boost::optional<Timestamp>) {
+ onMultikeyCommitFn(indexMetadataHasChanged);
+ });
// Within a multi-document transaction, reads should be able to see the effect of previous
// writes done within that transaction. If a previous write in a transaction has set the index
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 670622a17be..4e37bffe820 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -173,17 +173,16 @@ IndexCatalogEntry* IndexCatalogImpl::_setupInMemoryStructures(
}
if (!initFromDisk) {
- opCtx->recoveryUnit()->onRollback(
- [ this, opCtx, isReadyIndex, descriptor = descriptorPtr ] {
- // Need to preserve indexName as descriptor no longer exists after remove().
- const std::string indexName = descriptor->indexName();
- if (isReadyIndex) {
- _readyIndexes.remove(descriptor);
- } else {
- _buildingIndexes.remove(descriptor);
- }
- _collection->infoCache()->droppedIndex(opCtx, indexName);
- });
+ opCtx->recoveryUnit()->onRollback([this, opCtx, isReadyIndex, descriptor = descriptorPtr] {
+ // Need to preserve indexName as descriptor no longer exists after remove().
+ const std::string indexName = descriptor->indexName();
+ if (isReadyIndex) {
+ _readyIndexes.remove(descriptor);
+ } else {
+ _buildingIndexes.remove(descriptor);
+ }
+ _collection->infoCache()->droppedIndex(opCtx, indexName);
+ });
}
return save;
@@ -207,8 +206,7 @@ Status IndexCatalogImpl::checkUnfinished() const {
return Status(ErrorCodes::InternalError,
str::stream() << "IndexCatalog has left over indexes that must be cleared"
- << " ns: "
- << _collection->ns());
+ << " ns: " << _collection->ns());
}
std::unique_ptr<IndexCatalog::IndexIterator> IndexCatalogImpl::getIndexIterator(
@@ -244,8 +242,7 @@ string IndexCatalogImpl::_getAccessMethodName(const BSONObj& keyPattern) const {
// supports an index plugin unsupported by this version.
uassert(17197,
str::stream() << "Invalid index type '" << pluginName << "' "
- << "in index "
- << keyPattern,
+ << "in index " << keyPattern,
IndexNames::isKnownName(pluginName));
return pluginName;
@@ -432,10 +429,8 @@ StatusWith<BSONObj> IndexCatalogImpl::createIndexOnEmptyCollection(OperationCont
invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
invariant(_collection->numRecords(opCtx) == 0,
str::stream() << "Collection must be empty. Collection: " << _collection->ns()
- << " UUID: "
- << _collection->uuid()
- << " Count: "
- << _collection->numRecords(opCtx));
+ << " UUID: " << _collection->uuid()
+ << " Count: " << _collection->numRecords(opCtx));
_checkMagic();
Status status = checkUnfinished();
@@ -545,8 +540,7 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
if (!IndexDescriptor::isIndexVersionSupported(indexVersion)) {
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "this version of mongod cannot build new indexes "
- << "of version number "
- << static_cast<int>(indexVersion));
+ << "of version number " << static_cast<int>(indexVersion));
}
if (nss.isOplog())
@@ -563,9 +557,7 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "the \"ns\" field of the index spec '"
<< specNamespace.valueStringData()
- << "' does not match the collection name '"
- << nss
- << "'");
+ << "' does not match the collection name '" << nss << "'");
}
// logical name of the index
@@ -595,16 +587,15 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
if (indexNamespace.size() > NamespaceString::MaxNsLen)
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "namespace name generated from index name \""
- << indexNamespace
- << "\" is too long (127 byte max)");
+ << indexNamespace << "\" is too long (127 byte max)");
}
const BSONObj key = spec.getObjectField("key");
const Status keyStatus = index_key_validate::validateKeyPattern(key, indexVersion);
if (!keyStatus.isOK()) {
return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "bad index key pattern " << key << ": "
- << keyStatus.reason());
+ str::stream()
+ << "bad index key pattern " << key << ": " << keyStatus.reason());
}
const string pluginName = IndexNames::findPluginName(key);
@@ -633,18 +624,16 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
if (static_cast<IndexVersion>(vElt.numberInt()) < IndexVersion::kV2) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Index version " << vElt.fieldNameStringData() << "="
- << vElt.numberInt()
- << " does not support the '"
- << collationElement.fieldNameStringData()
- << "' option"};
+ << vElt.numberInt() << " does not support the '"
+ << collationElement.fieldNameStringData() << "' option"};
}
if ((pluginName != IndexNames::BTREE) && (pluginName != IndexNames::GEO_2DSPHERE) &&
(pluginName != IndexNames::HASHED) && (pluginName != IndexNames::WILDCARD)) {
return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "Index type '" << pluginName
- << "' does not support collation: "
- << collator->getSpec().toBSON());
+ str::stream()
+ << "Index type '" << pluginName
+ << "' does not support collation: " << collator->getSpec().toBSON());
}
}
@@ -665,8 +654,8 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
if (spec.getField("expireAfterSeconds")) {
return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "Index type '" << pluginName
- << "' cannot be a TTL index");
+ str::stream()
+ << "Index type '" << pluginName << "' cannot be a TTL index");
}
}
@@ -776,21 +765,18 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
<< "An index with the same key pattern, but a different "
<< "collation already exists with the same name. Try again with "
<< "a unique name. "
- << "Existing index: "
- << desc->infoObj()
- << " Requested index: "
- << spec);
+ << "Existing index: " << desc->infoObj()
+ << " Requested index: " << spec);
}
if (SimpleBSONObjComparator::kInstance.evaluate(desc->keyPattern() != key) ||
SimpleBSONObjComparator::kInstance.evaluate(
desc->infoObj().getObjectField("collation") != collation)) {
return Status(ErrorCodes::IndexKeySpecsConflict,
- str::stream() << "Index must have unique name."
- << "The existing index: "
- << desc->infoObj()
- << " has the same name as the requested index: "
- << spec);
+ str::stream()
+ << "Index must have unique name."
+ << "The existing index: " << desc->infoObj()
+ << " has the same name as the requested index: " << spec);
}
IndexDescriptor temp(_collection, _getAccessMethodName(key), spec);
@@ -816,9 +802,9 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
IndexDescriptor temp(_collection, _getAccessMethodName(key), spec);
if (!desc->areIndexOptionsEquivalent(&temp))
return Status(ErrorCodes::IndexOptionsConflict,
- str::stream() << "Index: " << spec
- << " already exists with different options: "
- << desc->infoObj());
+ str::stream()
+ << "Index: " << spec
+ << " already exists with different options: " << desc->infoObj());
return Status(ErrorCodes::IndexOptionsConflict,
str::stream() << "Index with name: " << name
@@ -843,8 +829,7 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "only one text index per collection allowed, "
<< "found existing text index \""
- << textIndexes[0]->indexName()
- << "\"");
+ << textIndexes[0]->indexName() << "\"");
}
}
return Status::OK();
diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp
index 87a9bc74f74..3a2f297cf1a 100644
--- a/src/mongo/db/catalog/index_consistency.cpp
+++ b/src/mongo/db/catalog/index_consistency.cpp
@@ -459,8 +459,7 @@ BSONObj IndexConsistency::_generateInfo(const int& indexNumber,
if (idKey) {
return BSON("indexName" << indexName << "recordId" << recordId.repr() << "idKey" << *idKey
- << "indexKey"
- << rehydratedKey);
+ << "indexKey" << rehydratedKey);
} else {
return BSON("indexName" << indexName << "recordId" << recordId.repr() << "indexKey"
<< rehydratedKey);
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index c9cd4223504..56d521edbc4 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -108,7 +108,7 @@ static const std::set<StringData> allowedIdIndexFieldNames = {
IndexDescriptor::kNamespaceFieldName,
// Index creation under legacy writeMode can result in an index spec with an _id field.
"_id"};
-}
+} // namespace
Status validateKeyPattern(const BSONObj& key, IndexDescriptor::IndexVersion indexVersion) {
const ErrorCodes::Error code = ErrorCodes::CannotCreateIndex;
@@ -134,8 +134,7 @@ Status validateKeyPattern(const BSONObj& key, IndexDescriptor::IndexVersion inde
if (keyElement.type() == BSONType::Object || keyElement.type() == BSONType::Array) {
return {code,
str::stream() << "Values in index key pattern cannot be of type "
- << typeName(keyElement.type())
- << " for index version v:"
+ << typeName(keyElement.type()) << " for index version v:"
<< static_cast<int>(indexVersion)};
}
@@ -276,9 +275,9 @@ StatusWith<BSONObj> validateIndexSpec(
if (IndexDescriptor::kKeyPatternFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kKeyPatternFieldName
- << "' must be an object, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kKeyPatternFieldName
+ << "' must be an object, but got " << typeName(indexSpecElem.type())};
}
std::vector<StringData> keys;
@@ -313,18 +312,18 @@ StatusWith<BSONObj> validateIndexSpec(
} else if (IndexDescriptor::kIndexNameFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kIndexNameFieldName
- << "' must be a string, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kIndexNameFieldName
+ << "' must be a string, but got " << typeName(indexSpecElem.type())};
}
hasIndexNameField = true;
} else if (IndexDescriptor::kNamespaceFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kNamespaceFieldName
- << "' must be a string, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kNamespaceFieldName
+ << "' must be a string, but got " << typeName(indexSpecElem.type())};
}
StringData ns = indexSpecElem.valueStringData();
@@ -336,22 +335,19 @@ StatusWith<BSONObj> validateIndexSpec(
if (ns != expectedNamespace.ns()) {
return {ErrorCodes::BadValue,
- str::stream() << "The value of the field '"
- << IndexDescriptor::kNamespaceFieldName
- << "' ("
- << ns
- << ") doesn't match the namespace '"
- << expectedNamespace
- << "'"};
+ str::stream()
+ << "The value of the field '" << IndexDescriptor::kNamespaceFieldName
+ << "' (" << ns << ") doesn't match the namespace '" << expectedNamespace
+ << "'"};
}
hasNamespaceField = true;
} else if (IndexDescriptor::kIndexVersionFieldName == indexSpecElemFieldName) {
if (!indexSpecElem.isNumber()) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kIndexVersionFieldName
- << "' must be a number, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kIndexVersionFieldName
+ << "' must be a number, but got " << typeName(indexSpecElem.type())};
}
auto requestedIndexVersionAsInt = representAs<int>(indexSpecElem.number());
@@ -375,9 +371,9 @@ StatusWith<BSONObj> validateIndexSpec(
} else if (IndexDescriptor::kCollationFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kCollationFieldName
- << "' must be an object, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kCollationFieldName
+ << "' must be an object, but got " << typeName(indexSpecElem.type())};
}
if (indexSpecElem.Obj().isEmpty()) {
@@ -390,10 +386,9 @@ StatusWith<BSONObj> validateIndexSpec(
} else if (IndexDescriptor::kPartialFilterExprFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '"
- << IndexDescriptor::kPartialFilterExprFieldName
- << "' must be an object, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kPartialFilterExprFieldName
+ << "' must be an object, but got " << typeName(indexSpecElem.type())};
}
// Just use the simple collator, even though the index may have a separate collation
@@ -419,10 +414,9 @@ StatusWith<BSONObj> validateIndexSpec(
const auto key = indexSpec.getObjectField(IndexDescriptor::kKeyPatternFieldName);
if (IndexNames::findPluginName(key) != IndexNames::WILDCARD) {
return {ErrorCodes::BadValue,
- str::stream() << "The field '" << IndexDescriptor::kPathProjectionFieldName
- << "' is only allowed in an '"
- << IndexNames::WILDCARD
- << "' index"};
+ str::stream()
+ << "The field '" << IndexDescriptor::kPathProjectionFieldName
+ << "' is only allowed in an '" << IndexNames::WILDCARD << "' index"};
}
if (indexSpecElem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
@@ -432,10 +426,10 @@ StatusWith<BSONObj> validateIndexSpec(
}
if (!key.hasField("$**")) {
return {ErrorCodes::FailedToParse,
- str::stream() << "The field '" << IndexDescriptor::kPathProjectionFieldName
- << "' is only allowed when '"
- << IndexDescriptor::kKeyPatternFieldName
- << "' is {\"$**\": ±1}"};
+ str::stream()
+ << "The field '" << IndexDescriptor::kPathProjectionFieldName
+ << "' is only allowed when '" << IndexDescriptor::kKeyPatternFieldName
+ << "' is {\"$**\": ±1}"};
}
if (indexSpecElem.embeddedObject().isEmpty()) {
@@ -478,10 +472,8 @@ StatusWith<BSONObj> validateIndexSpec(
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid index specification " << indexSpec
<< "; cannot create an index with the '"
- << IndexDescriptor::kCollationFieldName
- << "' option and "
- << IndexDescriptor::kIndexVersionFieldName
- << "="
+ << IndexDescriptor::kCollationFieldName << "' option and "
+ << IndexDescriptor::kIndexVersionFieldName << "="
<< static_cast<int>(*resolvedIndexVersion)};
}
diff --git a/src/mongo/db/catalog/index_key_validate_test.cpp b/src/mongo/db/catalog/index_key_validate_test.cpp
index bbb55b5281a..d61cbb8e0d7 100644
--- a/src/mongo/db/catalog/index_key_validate_test.cpp
+++ b/src/mongo/db/catalog/index_key_validate_test.cpp
@@ -127,8 +127,7 @@ TEST(IndexKeyValidateTest, KeyElementBooleanValueFailsForV2Indexes) {
ASSERT_EQ(ErrorCodes::CannotCreateIndex,
validateKeyPattern(BSON("a"
<< "2dsphere"
- << "b"
- << true),
+ << "b" << true),
IndexVersion::kV2));
}
@@ -137,8 +136,7 @@ TEST(IndexKeyValidateTest, KeyElementBooleanValueSucceedsForV1Indexes) {
ASSERT_OK(validateKeyPattern(BSON("x" << false), IndexVersion::kV1));
ASSERT_OK(validateKeyPattern(BSON("a"
<< "2dsphere"
- << "b"
- << true),
+ << "b" << true),
IndexVersion::kV1));
}
diff --git a/src/mongo/db/catalog/index_spec_validate_test.cpp b/src/mongo/db/catalog/index_spec_validate_test.cpp
index 560f4820579..6b472d09073 100644
--- a/src/mongo/db/catalog/index_spec_validate_test.cpp
+++ b/src/mongo/db/catalog/index_spec_validate_test.cpp
@@ -50,8 +50,8 @@
namespace mongo {
namespace {
-using index_key_validate::validateIndexSpec;
using index_key_validate::validateIdIndexSpec;
+using index_key_validate::validateIndexSpec;
using index_key_validate::validateIndexSpecCollation;
using unittest::EnsureFCV;
@@ -140,16 +140,14 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfNamespaceIsNotAString) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << 1),
+ << "ns" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::TypeMismatch,
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << BSONObj()),
+ << "ns" << BSONObj()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -181,8 +179,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfNamespaceDoesNotMatch) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.coll()),
+ << "ns" << kTestNamespace.coll()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -191,8 +188,7 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecWithNamespaceFilledInIfItIsNotPresen
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 1),
+ << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -200,10 +196,7 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecWithNamespaceFilledInIfItIsNotPresen
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 1)),
+ << "ns" << kTestNamespace.ns() << "v" << 1)),
sorted(result.getValue()));
// Verify that the index specification we returned is still considered valid.
@@ -215,10 +208,7 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecUnchangedIfNamespaceAndVersionArePre
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 1),
+ << "ns" << kTestNamespace.ns() << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -228,8 +218,7 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecUnchangedIfNamespaceAndVersionArePre
<< "indexName"
<< "ns"
<< "test.index_spec_validate"
- << "v"
- << 1)),
+ << "v" << 1)),
sorted(result.getValue()));
}
@@ -246,8 +235,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsNotANumber) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << BSONObj()),
+ << "v" << BSONObj()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -257,32 +245,28 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsNotRepresentableAsInt) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2.2),
+ << "v" << 2.2),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::BadValue,
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << std::nan("1")),
+ << "v" << std::nan("1")),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::BadValue,
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << std::numeric_limits<double>::infinity()),
+ << "v" << std::numeric_limits<double>::infinity()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::BadValue,
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << std::numeric_limits<long long>::max()),
+ << "v" << std::numeric_limits<long long>::max()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -292,8 +276,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsV0) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 0),
+ << "v" << 0),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -303,9 +286,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsUnsupported) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 3
- << "collation"
+ << "v" << 3 << "collation"
<< BSON("locale"
<< "en")),
kTestNamespace,
@@ -315,8 +296,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsUnsupported) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << -3LL),
+ << "v" << -3LL),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -325,8 +305,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionsThatAreAllowedForCreation) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 1),
+ << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -334,17 +313,13 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionsThatAreAllowedForCreation) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 1)),
+ << "ns" << kTestNamespace.ns() << "v" << 1)),
sorted(result.getValue()));
result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2LL),
+ << "v" << 2LL),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -352,10 +327,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionsThatAreAllowedForCreation) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2LL)),
+ << "ns" << kTestNamespace.ns() << "v" << 2LL)),
sorted(result.getValue()));
}
@@ -363,8 +335,7 @@ TEST(IndexSpecValidateTest, DefaultIndexVersionIsV2) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()),
+ << "ns" << kTestNamespace.ns()),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -372,10 +343,7 @@ TEST(IndexSpecValidateTest, DefaultIndexVersionIsV2) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)),
+ << "ns" << kTestNamespace.ns() << "v" << 2)),
sorted(result.getValue()));
// Verify that the index specification we returned is still considered valid.
@@ -387,8 +355,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionV1) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 1),
+ << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -396,10 +363,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionV1) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 1)),
+ << "ns" << kTestNamespace.ns() << "v" << 1)),
sorted(result.getValue()));
}
@@ -408,8 +372,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsNotAnObject) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "collation"
- << 1),
+ << "collation" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::TypeMismatch,
@@ -424,8 +387,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsNotAnObject) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "collation"
- << BSONArray()),
+ << "collation" << BSONArray()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -435,8 +397,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsEmpty) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "collation"
- << BSONObj()),
+ << "collation" << BSONObj()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -449,8 +410,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsPresentAndVersionIsLessTh
<< "collation"
<< BSON("locale"
<< "simple")
- << "v"
- << 1),
+ << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -459,9 +419,7 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2
- << "collation"
+ << "v" << 2 << "collation"
<< BSON("locale"
<< "simple")),
kTestNamespace,
@@ -471,11 +429,7 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
<< BSON("locale"
<< "simple"))),
sorted(result.getValue()));
@@ -483,9 +437,7 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) {
result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2
- << "collation"
+ << "v" << 2 << "collation"
<< BSON("unknownCollationOption" << true)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
@@ -494,11 +446,7 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
<< BSON("unknownCollationOption" << true))),
sorted(result.getValue()));
}
@@ -507,9 +455,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexSpecIfCollationIsPresentAndVersionIsEqua
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2
- << "collation"
+ << "v" << 2 << "collation"
<< BSON("locale"
<< "en")),
kTestNamespace,
@@ -519,11 +465,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexSpecIfCollationIsPresentAndVersionIsEqua
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
<< BSON("locale"
<< "en"))),
sorted(result.getValue()));
@@ -533,10 +475,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfUnknownFieldIsPresentInSpecV2) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2
- << "unknownField"
- << 1),
+ << "v" << 2 << "unknownField" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption, result);
@@ -546,10 +485,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfUnknownFieldIsPresentInSpecV1) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 1
- << "unknownField"
- << 1),
+ << "v" << 1 << "unknownField" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption, result);
@@ -559,95 +495,59 @@ TEST(IdIndexSpecValidateTest, ReturnsAnErrorIfKeyPatternIsIncorrectForIdIndex) {
ASSERT_EQ(ErrorCodes::BadValue,
validateIdIndexSpec(BSON("key" << BSON("_id" << -1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)));
+ << "ns" << kTestNamespace.ns() << "v" << 2)));
ASSERT_EQ(ErrorCodes::BadValue,
validateIdIndexSpec(BSON("key" << BSON("a" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)));
+ << "ns" << kTestNamespace.ns() << "v" << 2)));
}
TEST(IdIndexSpecValidateTest, ReturnsOKStatusIfKeyPatternCorrectForIdIndex) {
ASSERT_OK(validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "anyname"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)));
+ << "ns" << kTestNamespace.ns() << "v" << 2)));
}
TEST(IdIndexSpecValidateTest, ReturnsAnErrorIfFieldNotAllowedForIdIndex) {
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "background"
- << false)));
+ << "ns" << kTestNamespace.ns() << "v" << 2
+ << "background" << false)));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "unique"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "unique"
<< true)));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "partialFilterExpression"
- << BSON("a" << 5))));
+ << "ns" << kTestNamespace.ns() << "v" << 2
+ << "partialFilterExpression" << BSON("a" << 5))));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "sparse"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "sparse"
<< false)));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "expireAfterSeconds"
- << 3600)));
+ << "ns" << kTestNamespace.ns() << "v" << 2
+ << "expireAfterSeconds" << 3600)));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "storageEngine"
- << BSONObj())));
+ << "ns" << kTestNamespace.ns() << "v" << 2
+ << "storageEngine" << BSONObj())));
}
TEST(IdIndexSpecValidateTest, ReturnsOKStatusIfAllFieldsAllowedForIdIndex) {
- ASSERT_OK(validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
- << "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
- << BSON("locale"
- << "simple"))));
+ ASSERT_OK(
+ validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
+ << "_id_"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
+ << BSON("locale"
+ << "simple"))));
}
TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
@@ -659,10 +559,7 @@ TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
+ << "ns" << kTestNamespace.ns() << "v" << 2
<< "collation"
<< BSON("locale"
<< "mock_reverse_string")),
@@ -670,34 +567,21 @@ TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
ASSERT_OK(result.getStatus());
// We don't care about the order of the fields in the resulting index specification.
- ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
- << "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
- << BSON("locale"
- << "mock_reverse_string"
- << "caseLevel"
- << false
- << "caseFirst"
- << "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
- << "non-ignorable"
- << "maxVariable"
- << "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
- << "mock_version"))),
- sorted(result.getValue()));
+ ASSERT_BSONOBJ_EQ(
+ sorted(BSON("key" << BSON("field" << 1) << "name"
+ << "indexName"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
+ << BSON("locale"
+ << "mock_reverse_string"
+ << "caseLevel" << false << "caseFirst"
+ << "off"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "non-ignorable"
+ << "maxVariable"
+ << "punct"
+ << "normalization" << false << "backwards" << false << "version"
+ << "mock_version"))),
+ sorted(result.getValue()));
}
TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
@@ -709,10 +593,7 @@ TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
+ << "ns" << kTestNamespace.ns() << "v" << 2
<< "collation"
<< BSON("locale"
<< "simple")),
@@ -722,10 +603,7 @@ TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)),
+ << "ns" << kTestNamespace.ns() << "v" << 2)),
sorted(result.getValue()));
}
@@ -738,50 +616,33 @@ TEST(IndexSpecCollationValidateTest, FillsInCollationFieldWithCollectionDefaultI
auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2),
+ << "ns" << kTestNamespace.ns() << "v" << 2),
&defaultCollator);
ASSERT_OK(result.getStatus());
// We don't care about the order of the fields in the resulting index specification.
- ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
- << "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
- << BSON("locale"
- << "mock_reverse_string"
- << "caseLevel"
- << false
- << "caseFirst"
- << "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
- << "non-ignorable"
- << "maxVariable"
- << "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
- << "mock_version"))),
- sorted(result.getValue()));
+ ASSERT_BSONOBJ_EQ(
+ sorted(BSON("key" << BSON("field" << 1) << "name"
+ << "indexName"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
+ << BSON("locale"
+ << "mock_reverse_string"
+ << "caseLevel" << false << "caseFirst"
+ << "off"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "non-ignorable"
+ << "maxVariable"
+ << "punct"
+ << "normalization" << false << "backwards" << false << "version"
+ << "mock_version"))),
+ sorted(result.getValue()));
}
TEST(IndexSpecPartialFilterTest, FailsIfPartialFilterIsNotAnObject) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "partialFilterExpression"
- << 1),
+ << "partialFilterExpression" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus(), ErrorCodes::TypeMismatch);
@@ -802,8 +663,7 @@ TEST(IndexSpecPartialFilterTest, AcceptsValidPartialFilterExpression) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "partialFilterExpression"
- << BSON("a" << 1)),
+ << "partialFilterExpression" << BSON("a" << 1)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -811,25 +671,25 @@ TEST(IndexSpecPartialFilterTest, AcceptsValidPartialFilterExpression) {
TEST(IndexSpecWildcard, SucceedsWithInclusion) {
EnsureFCV guard(ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42);
- auto result = validateIndexSpec(kDefaultOpCtx,
- BSON("key" << BSON("$**" << 1) << "name"
- << "indexName"
- << "wildcardProjection"
- << BSON("a" << 1 << "b" << 1)),
- kTestNamespace,
- serverGlobalParams.featureCompatibility);
+ auto result =
+ validateIndexSpec(kDefaultOpCtx,
+ BSON("key" << BSON("$**" << 1) << "name"
+ << "indexName"
+ << "wildcardProjection" << BSON("a" << 1 << "b" << 1)),
+ kTestNamespace,
+ serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
}
TEST(IndexSpecWildcard, SucceedsWithExclusion) {
EnsureFCV guard(ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42);
- auto result = validateIndexSpec(kDefaultOpCtx,
- BSON("key" << BSON("$**" << 1) << "name"
- << "indexName"
- << "wildcardProjection"
- << BSON("a" << 0 << "b" << 0)),
- kTestNamespace,
- serverGlobalParams.featureCompatibility);
+ auto result =
+ validateIndexSpec(kDefaultOpCtx,
+ BSON("key" << BSON("$**" << 1) << "name"
+ << "indexName"
+ << "wildcardProjection" << BSON("a" << 0 << "b" << 0)),
+ kTestNamespace,
+ serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
}
@@ -895,13 +755,13 @@ TEST(IndexSpecWildcard, FailsWithImproperFeatureCompatabilityVersion) {
TEST(IndexSpecWildcard, FailsWithMixedProjection) {
EnsureFCV guard(ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42);
- auto result = validateIndexSpec(kDefaultOpCtx,
- BSON("key" << BSON("$**" << 1) << "name"
- << "indexName"
- << "wildcardProjection"
- << BSON("a" << 1 << "b" << 0)),
- kTestNamespace,
- serverGlobalParams.featureCompatibility);
+ auto result =
+ validateIndexSpec(kDefaultOpCtx,
+ BSON("key" << BSON("$**" << 1) << "name"
+ << "indexName"
+ << "wildcardProjection" << BSON("a" << 1 << "b" << 0)),
+ kTestNamespace,
+ serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), 40178);
}
@@ -923,8 +783,7 @@ TEST(IndexSpecWildcard, FailsWhenProjectionPluginNotWildcard) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("a" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << BSON("a" << 1)),
+ << "wildcardProjection" << BSON("a" << 1)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::BadValue);
@@ -935,8 +794,7 @@ TEST(IndexSpecWildcard, FailsWhenProjectionIsNotAnObject) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("$**" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << 4),
+ << "wildcardProjection" << 4),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::TypeMismatch);
@@ -947,8 +805,7 @@ TEST(IndexSpecWildcard, FailsWithEmptyProjection) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("$**" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << BSONObj()),
+ << "wildcardProjection" << BSONObj()),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
@@ -959,8 +816,7 @@ TEST(IndexSpecWildcard, FailsWhenInclusionWithSubpath) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("a.$**" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << BSON("a" << 1)),
+ << "wildcardProjection" << BSON("a" << 1)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
@@ -971,8 +827,7 @@ TEST(IndexSpecWildcard, FailsWhenExclusionWithSubpath) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("a.$**" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << BSON("b" << 0)),
+ << "wildcardProjection" << BSON("b" << 0)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
diff --git a/src/mongo/db/catalog/index_timestamp_helper.h b/src/mongo/db/catalog/index_timestamp_helper.h
index 581b1bd4740..9ae4457e409 100644
--- a/src/mongo/db/catalog/index_timestamp_helper.h
+++ b/src/mongo/db/catalog/index_timestamp_helper.h
@@ -55,6 +55,6 @@ void setGhostCommitTimestampForWrite(OperationContext* opCtx, const NamespaceStr
* also throw WriteConflictException.
*/
bool setGhostCommitTimestampForCatalogWrite(OperationContext* opCtx, const NamespaceString& nss);
-};
+}; // namespace IndexTimestampHelper
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index 287f34bb57e..920f6773f83 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -135,8 +135,8 @@ void MultiIndexBlock::cleanUpAfterBuild(OperationContext* opCtx, Collection* col
replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
opCtx->getServiceContext()->getOpObserver()->onOpMessage(
opCtx,
- BSON("msg" << std::string(str::stream() << "Failing index builds. Coll: "
- << nss)));
+ BSON("msg" << std::string(str::stream()
+ << "Failing index builds. Coll: " << nss)));
} else {
// Simply get a timestamp to write with here; we can't write to the oplog.
repl::UnreplicatedWritesBlock uwb(opCtx);
@@ -195,7 +195,7 @@ MultiIndexBlock::OnInitFn MultiIndexBlock::kNoopOnInitFn =
MultiIndexBlock::OnInitFn MultiIndexBlock::makeTimestampedIndexOnInitFn(OperationContext* opCtx,
const Collection* coll) {
- return [ opCtx, ns = coll->ns() ](std::vector<BSONObj> & specs)->Status {
+ return [opCtx, ns = coll->ns()](std::vector<BSONObj>& specs) -> Status {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (opCtx->recoveryUnit()->getCommitTimestamp().isNull() &&
replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
@@ -226,13 +226,11 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(OperationContext* opCtx,
if (State::kAborted == _getState()) {
return {ErrorCodes::IndexBuildAborted,
str::stream() << "Index build aborted: " << _abortReason
- << ". Cannot initialize index builder: "
- << collection->ns()
+ << ". Cannot initialize index builder: " << collection->ns()
<< (collection->uuid()
? (" (" + collection->uuid()->toString() + "): ")
: ": ")
- << indexSpecs.size()
- << " provided. First index spec: "
+ << indexSpecs.size() << " provided. First index spec: "
<< (indexSpecs.empty() ? BSONObj() : indexSpecs[0])};
}
@@ -741,8 +739,7 @@ Status MultiIndexBlock::commit(OperationContext* opCtx,
return {
ErrorCodes::IndexBuildAborted,
str::stream() << "Index build aborted: " << _abortReason
- << ". Cannot commit index builder: "
- << collection->ns()
+ << ". Cannot commit index builder: " << collection->ns()
<< (_collectionUUID ? (" (" + _collectionUUID->toString() + ")") : "")};
}
diff --git a/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp b/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp
index 064b0f4f359..c1cfd965afd 100644
--- a/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp
+++ b/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp
@@ -60,7 +60,7 @@ KeyString makeWildCardMultikeyMetadataKeyString(const BSONObj& indexKey) {
const RecordId multikeyMetadataRecordId(RecordId::ReservedId::kWildcardMultikeyMetadataId);
return {KeyString::kLatestVersion, indexKey, multikeyMetadataOrd, multikeyMetadataRecordId};
}
-}
+} // namespace
Status RecordStoreValidateAdaptor::validate(const RecordId& recordId,
const RecordData& record,
@@ -117,9 +117,9 @@ Status RecordStoreValidateAdaptor::validate(const RecordId& recordId,
{documentKeySet.begin(), documentKeySet.end()},
{multikeyMetadataKeys.begin(), multikeyMetadataKeys.end()},
multikeyPaths)) {
- std::string msg = str::stream() << "Index " << descriptor->indexName()
- << " is not multi-key, but a multikey path "
- << " is present in document " << recordId;
+ std::string msg = str::stream()
+ << "Index " << descriptor->indexName() << " is not multi-key, but a multikey path "
+ << " is present in document " << recordId;
curRecordResults.errors.push_back(msg);
curRecordResults.valid = false;
}
@@ -203,9 +203,9 @@ void RecordStoreValidateAdaptor::traverseIndex(const IndexAccessMethod* iam,
}
if (results && _indexConsistency->getMultikeyMetadataPathCount(indexNumber) > 0) {
- results->errors.push_back(
- str::stream() << "Index '" << descriptor->indexName()
- << "' has one or more missing multikey metadata index keys");
+ results->errors.push_back(str::stream()
+ << "Index '" << descriptor->indexName()
+ << "' has one or more missing multikey metadata index keys");
results->valid = false;
}
diff --git a/src/mongo/db/catalog/private/record_store_validate_adaptor.h b/src/mongo/db/catalog/private/record_store_validate_adaptor.h
index 4885b98d66f..00ab221e89a 100644
--- a/src/mongo/db/catalog/private/record_store_validate_adaptor.h
+++ b/src/mongo/db/catalog/private/record_store_validate_adaptor.h
@@ -101,4 +101,4 @@ private:
IndexCatalog* _indexCatalog; // Not owned.
ValidateResultsMap* _indexNsResultsMap; // Not owned.
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index f756dab37e9..8e087d7e213 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -203,15 +203,8 @@ Status renameTargetCollectionToTmp(OperationContext* opCtx,
if (!tmpNameResult.isOK()) {
return tmpNameResult.getStatus().withContext(
str::stream() << "Cannot generate a temporary collection name for the target "
- << targetNs
- << " ("
- << targetUUID
- << ") so that the source"
- << sourceNs
- << " ("
- << sourceUUID
- << ") could be renamed to "
- << targetNs);
+ << targetNs << " (" << targetUUID << ") so that the source" << sourceNs
+ << " (" << sourceUUID << ") could be renamed to " << targetNs);
}
const auto& tmpName = tmpNameResult.getValue();
const bool stayTemp = true;
@@ -339,9 +332,10 @@ Status renameCollectionWithinDB(OperationContext* opCtx,
boost::optional<Lock::CollectionLock> targetLock;
// To prevent deadlock, always lock system.views collection in the end because concurrent
// view-related operations always lock system.views in the end.
- if (!source.isSystemDotViews() && (target.isSystemDotViews() ||
- ResourceId(RESOURCE_COLLECTION, source.ns()) <
- ResourceId(RESOURCE_COLLECTION, target.ns()))) {
+ if (!source.isSystemDotViews() &&
+ (target.isSystemDotViews() ||
+ ResourceId(RESOURCE_COLLECTION, source.ns()) <
+ ResourceId(RESOURCE_COLLECTION, target.ns()))) {
// To prevent deadlock, always lock source and target in ascending resourceId order.
sourceLock.emplace(opCtx, source, MODE_X);
targetLock.emplace(opCtx, target, MODE_X);
@@ -546,8 +540,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
if (!tmpNameResult.isOK()) {
return tmpNameResult.getStatus().withContext(
str::stream() << "Cannot generate temporary collection name to rename " << source
- << " to "
- << target);
+ << " to " << target);
}
const auto& tmpName = tmpNameResult.getValue();
@@ -639,7 +632,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
*(tmpColl->uuid()),
indexToCopy,
false // fromMigrate
- );
+ );
auto indexResult =
tmpIndexCatalog->createIndexOnEmptyCollection(opCtx, indexToCopy);
if (!indexResult.isOK()) {
@@ -700,7 +693,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
}
cursor->save();
// When this exits via success or WCE, we need to restore the cursor.
- ON_BLOCK_EXIT([ opCtx, ns = tmpName.ns(), &cursor ]() {
+ ON_BLOCK_EXIT([opCtx, ns = tmpName.ns(), &cursor]() {
writeConflictRetry(
opCtx, "retryRestoreCursor", ns, [&cursor] { cursor->restore(); });
});
@@ -867,9 +860,7 @@ Status renameCollectionForRollback(OperationContext* opCtx,
invariant(source->db() == target.db(),
str::stream() << "renameCollectionForRollback: source and target namespaces must "
"have the same database. source: "
- << *source
- << ". target: "
- << target);
+ << *source << ". target: " << target);
log() << "renameCollectionForRollback: rename " << *source << " (" << uuid << ") to " << target
<< ".";
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index ed6dfe5de27..ace8cd0957b 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -329,8 +329,8 @@ void _createCollection(OperationContext* opCtx,
<< " does not exist.";
WriteUnitOfWork wuow(opCtx);
- ASSERT_TRUE(db->createCollection(opCtx, nss, options)) << "Failed to create collection "
- << nss << " due to unknown error.";
+ ASSERT_TRUE(db->createCollection(opCtx, nss, options))
+ << "Failed to create collection " << nss << " due to unknown error.";
wuow.commit();
});
@@ -414,11 +414,8 @@ void _createIndexOnEmptyCollection(OperationContext* opCtx,
ASSERT_TRUE(collection) << "Cannot create index on empty collection " << nss
<< " because collection " << nss << " does not exist.";
- auto indexInfoObj = BSON(
- "v" << int(IndexDescriptor::kLatestIndexVersion) << "key" << BSON("a" << 1) << "name"
- << indexName
- << "ns"
- << nss.ns());
+ auto indexInfoObj = BSON("v" << int(IndexDescriptor::kLatestIndexVersion) << "key"
+ << BSON("a" << 1) << "name" << indexName << "ns" << nss.ns());
auto indexCatalog = collection->getIndexCatalog();
WriteUnitOfWork wuow(opCtx);
@@ -733,8 +730,8 @@ TEST_F(RenameCollectionTest, RenameCollectionMakesTargetCollectionDropPendingIfD
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNss, options));
ASSERT_FALSE(_collectionExists(_opCtx.get(), _sourceNss))
<< "source collection " << _sourceNss << " still exists after successful rename";
- ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss)) << "target collection " << _targetNss
- << " missing after successful rename";
+ ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss))
+ << "target collection " << _targetNss << " missing after successful rename";
ASSERT_TRUE(_opObserver->onRenameCollectionCalled);
ASSERT(_opObserver->onRenameCollectionDropTarget);
@@ -758,8 +755,8 @@ TEST_F(RenameCollectionTest,
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNss, options));
ASSERT_FALSE(_collectionExists(_opCtx.get(), _sourceNss))
<< "source collection " << _sourceNss << " still exists after successful rename";
- ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss)) << "target collection " << _targetNss
- << " missing after successful rename";
+ ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss))
+ << "target collection " << _targetNss << " missing after successful rename";
ASSERT_TRUE(_opObserver->onRenameCollectionCalled);
ASSERT_FALSE(_opObserver->onRenameCollectionDropTarget);
@@ -845,9 +842,8 @@ TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsDropTargetByUUIDEvenIfSo
_createCollectionWithUUID(_opCtx.get(), _targetNss);
auto dropTargetUUID = _createCollectionWithUUID(_opCtx.get(), dropTargetNss);
auto uuidDoc = BSON("ui" << UUID::gen());
- auto cmd =
- BSON("renameCollection" << missingSourceNss.ns() << "to" << _targetNss.ns() << "dropTarget"
- << dropTargetUUID);
+ auto cmd = BSON("renameCollection" << missingSourceNss.ns() << "to" << _targetNss.ns()
+ << "dropTarget" << dropTargetUUID);
ASSERT_OK(renameCollectionForApplyOps(
_opCtx.get(), missingSourceNss.db().toString(), uuidDoc["ui"], cmd, {}));
ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss));
@@ -885,9 +881,8 @@ TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsDropTargetByUUIDEvenIfSo
auto dropTargetUUID = _createCollectionWithUUID(_opCtx.get(), dropTargetNss);
auto uuidDoc = BSON("ui" << _createCollectionWithUUID(_opCtx.get(), dropPendingNss));
- auto cmd =
- BSON("renameCollection" << dropPendingNss.ns() << "to" << _targetNss.ns() << "dropTarget"
- << dropTargetUUID);
+ auto cmd = BSON("renameCollection" << dropPendingNss.ns() << "to" << _targetNss.ns()
+ << "dropTarget" << dropTargetUUID);
repl::UnreplicatedWritesBlock uwb(_opCtx.get());
repl::OpTime renameOpTime = {Timestamp(Seconds(200), 1U), 1LL};
@@ -930,8 +925,8 @@ void _testRenameCollectionStayTemp(OperationContext* opCtx,
RenameCollectionOptions options;
options.stayTemp = stayTemp;
ASSERT_OK(renameCollection(opCtx, sourceNss, targetNss, options));
- ASSERT_FALSE(_collectionExists(opCtx, sourceNss)) << "source collection " << sourceNss
- << " still exists after successful rename";
+ ASSERT_FALSE(_collectionExists(opCtx, sourceNss))
+ << "source collection " << sourceNss << " still exists after successful rename";
if (!isSourceCollectionTemporary) {
ASSERT_FALSE(_isTempCollection(opCtx, targetNss))
@@ -1018,8 +1013,8 @@ void _testRenameCollectionAcrossDatabaseOplogEntries(
_insertDocument(opCtx, sourceNss, BSON("_id" << 0));
oplogEntries->clear();
if (forApplyOps) {
- auto cmd = BSON(
- "renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "dropTarget" << true);
+ auto cmd = BSON("renameCollection" << sourceNss.ns() << "to" << targetNss.ns()
+ << "dropTarget" << true);
ASSERT_OK(renameCollectionForApplyOps(opCtx, sourceNss.db().toString(), {}, cmd, {}));
} else {
RenameCollectionOptions options;
diff --git a/src/mongo/db/catalog/util/partitioned.h b/src/mongo/db/catalog/util/partitioned.h
index cf3dd0f3625..c449932f653 100644
--- a/src/mongo/db/catalog/util/partitioned.h
+++ b/src/mongo/db/catalog/util/partitioned.h
@@ -237,7 +237,7 @@ public:
KeyPartitioner()(partitioned_detail::getKey(value), nPartitions);
this->_partitionedContainer->_partitions[partitionId].insert(std::move(value));
}
- void insert(value_type)&& = delete;
+ void insert(value_type) && = delete;
/**
* Erases one entry from the partitioned structure, returns the number of entries removed.
diff --git a/src/mongo/db/catalog/util/partitioned_test.cpp b/src/mongo/db/catalog/util/partitioned_test.cpp
index 06de76bfc26..1cd235c95d6 100644
--- a/src/mongo/db/catalog/util/partitioned_test.cpp
+++ b/src/mongo/db/catalog/util/partitioned_test.cpp
@@ -237,7 +237,6 @@ TEST(PartitionedConcurrency, ShouldProtectConcurrentAccesses) {
AtomicWord<unsigned> ready{0};
for (size_t threadId = 1; threadId <= numThreads; ++threadId) {
auto workerThreadBody = [&, threadId, opsPerThread]() {
-
// Busy-wait until everybody is ready
ready.fetchAndAdd(1);
while (ready.load() < numThreads) {
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index 3b2b4ed417a..40b6a883ef7 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -123,8 +123,7 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
str::stream()
<< "Unable to read from a snapshot due to pending collection catalog "
"changes; please retry the operation. Snapshot timestamp is "
- << mySnapshot->toString()
- << ". Collection minimum is "
+ << mySnapshot->toString() << ". Collection minimum is "
<< minSnapshot->toString(),
!minSnapshot || *mySnapshot >= *minSnapshot);
}
@@ -158,8 +157,7 @@ NamespaceString AutoGetCollection::resolveNamespaceStringOrUUID(OperationContext
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "UUID " << nsOrUUID.toString() << " specified in " << nsOrUUID.dbname()
- << " resolved to a collection in a different database: "
- << *resolvedNss,
+ << " resolved to a collection in a different database: " << *resolvedNss,
resolvedNss->db() == nsOrUUID.dbname());
return *resolvedNss;
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index b177bc24e07..358de1437de 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -55,9 +55,7 @@ thread_local ServiceContext::UniqueClient currentClient;
void invariantNoCurrentClient() {
invariant(!haveClient(),
str::stream() << "Already have client on this thread: " //
- << '"'
- << Client::getCurrent()->desc()
- << '"');
+ << '"' << Client::getCurrent()->desc() << '"');
}
} // namespace
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 0ebcff34f09..3e93171254b 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -299,7 +299,7 @@ void _appendCursorStats(BSONObjBuilder& b) {
b.appendNumber("totalNoTimeout", cursorStatsOpenNoTimeout.get());
b.appendNumber("timedOut", cursorStatsTimedOut.get());
}
-}
+} // namespace
void startClientCursorMonitor() {
clientCursorMonitor.go();
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index bf6387956ed..f79fa7067c3 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -132,8 +132,7 @@ struct Cloner::Fun {
uassert(
ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from_collection.ns()
- << " to "
- << to_collection.ns(),
+ << " to " << to_collection.ns(),
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, to_collection));
@@ -163,13 +162,12 @@ struct Cloner::Fun {
db->userCreateNS(
opCtx, to_collection, collectionOptions, createDefaultIndexes, indexSpec),
str::stream() << "collection creation failed during clone ["
- << to_collection.ns()
- << "]");
+ << to_collection.ns() << "]");
wunit.commit();
collection = db->getCollection(opCtx, to_collection);
invariant(collection,
- str::stream() << "Missing collection during clone [" << to_collection.ns()
- << "]");
+ str::stream()
+ << "Missing collection during clone [" << to_collection.ns() << "]");
});
}
@@ -209,8 +207,8 @@ struct Cloner::Fun {
collection = db->getCollection(opCtx, to_collection);
uassert(28594,
- str::stream() << "Collection " << to_collection.ns()
- << " dropped while cloning",
+ str::stream()
+ << "Collection " << to_collection.ns() << " dropped while cloning",
collection != NULL);
}
@@ -292,7 +290,7 @@ struct Cloner::Fun {
};
/* copy the specified collection
-*/
+ */
void Cloner::copy(OperationContext* opCtx,
const string& toDBName,
const NamespaceString& from_collection,
@@ -326,10 +324,7 @@ void Cloner::copy(OperationContext* opCtx,
uassert(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while cloning collection " << from_collection.ns()
- << " to "
- << to_collection.ns()
- << " with filter "
- << query.toString(),
+ << " to " << to_collection.ns() << " with filter " << query.toString(),
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, to_collection));
}
@@ -350,9 +345,7 @@ void Cloner::copyIndexes(OperationContext* opCtx,
uassert(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while copying indexes from " << from_collection.ns()
- << " to "
- << to_collection.ns()
- << " (Cloner)",
+ << " to " << to_collection.ns() << " (Cloner)",
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, to_collection));
@@ -381,11 +374,9 @@ void Cloner::copyIndexes(OperationContext* opCtx,
createDefaultIndexes,
fixIndexSpec(to_collection.db().toString(),
getIdIndexSpec(from_indexes))),
- str::stream() << "Collection creation failed while copying indexes from "
- << from_collection.ns()
- << " to "
- << to_collection.ns()
- << " (Cloner)");
+ str::stream()
+ << "Collection creation failed while copying indexes from "
+ << from_collection.ns() << " to " << to_collection.ns() << " (Cloner)");
wunit.commit();
collection = db->getCollection(opCtx, to_collection);
invariant(collection,
@@ -602,8 +593,7 @@ Status Cloner::createCollectionsForDb(
// we're trying to create already exists.
return Status(ErrorCodes::NamespaceExists,
str::stream() << "unsharded collection with same namespace "
- << nss.ns()
- << " already exists.");
+ << nss.ns() << " already exists.");
}
// If the collection is sharded and a collection with the same name already
@@ -625,12 +615,9 @@ Status Cloner::createCollectionsForDb(
return Status(
ErrorCodes::InvalidOptions,
str::stream()
- << "sharded collection with same namespace "
- << nss.ns()
+ << "sharded collection with same namespace " << nss.ns()
<< " already exists, but options don't match. Existing options are "
- << existingOpts
- << " and new options are "
- << options);
+ << existingOpts << " and new options are " << options);
}
// If the collection does not already exist and is sharded, we create a new
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index b0eeddefe5b..152d71f2a7c 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -52,10 +52,10 @@
namespace mongo {
-using std::unique_ptr;
+using std::endl;
using std::string;
using std::stringstream;
-using std::endl;
+using std::unique_ptr;
/**
* The cloneCollection command is deprecated.
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index e2fab366906..42b1ff38dfe 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -118,8 +118,7 @@ public:
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)) {
uasserted(ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from << " to "
- << to
- << " (as capped)");
+ << to << " (as capped)");
}
Database* const db = autoDb.getDb();
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index d78af120927..c97db6c58c5 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -149,4 +149,4 @@ public:
}
};
static CompactCmd compactCmd;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/connection_status.cpp b/src/mongo/db/commands/connection_status.cpp
index cf470ebc6ae..04ca3a12f83 100644
--- a/src/mongo/db/commands/connection_status.cpp
+++ b/src/mongo/db/commands/connection_status.cpp
@@ -130,4 +130,4 @@ public:
return true;
}
} cmdConnectionStatus;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 82a6408b6e8..0cf4eda8067 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -51,9 +51,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Failpoint which causes to hang "count" cmd after acquiring the DB lock.
MONGO_FAIL_POINT_DEFINE(hangBeforeCollectionCount);
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 5bc8dc8d00a..d14f5505a1d 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -109,9 +109,9 @@ StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs(
if (kIndexesFieldName == cmdElemFieldName) {
if (cmdElem.type() != BSONType::Array) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << kIndexesFieldName
- << "' must be an array, but got "
- << typeName(cmdElem.type())};
+ str::stream()
+ << "The field '" << kIndexesFieldName << "' must be an array, but got "
+ << typeName(cmdElem.type())};
}
for (auto&& indexesElem : cmdElem.Obj()) {
@@ -163,16 +163,15 @@ StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs(
continue;
} else {
return {ErrorCodes::BadValue,
- str::stream() << "Invalid field specified for " << kCommandName << " command: "
- << cmdElemFieldName};
+ str::stream() << "Invalid field specified for " << kCommandName
+ << " command: " << cmdElemFieldName};
}
}
if (!hasIndexesField) {
return {ErrorCodes::FailedToParse,
str::stream() << "The '" << kIndexesFieldName
- << "' field is a required argument of the "
- << kCommandName
+ << "' field is a required argument of the " << kCommandName
<< " command"};
}
@@ -202,15 +201,13 @@ Status validateTTLOptions(OperationContext* opCtx, const BSONObj& cmdObj) {
str::stream() << "TTL index '" << kExpireAfterSeconds
<< "' option must be numeric, but received a type of '"
<< typeName(expireAfterSecondsElt.type())
- << "'. Index spec: "
- << indexObj};
+ << "'. Index spec: " << indexObj};
}
if (expireAfterSecondsElt.safeNumberLong() < 0) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "TTL index '" << kExpireAfterSeconds
- << "' option cannot be less than 0. Index spec: "
- << indexObj};
+ << "' option cannot be less than 0. Index spec: " << indexObj};
}
const std::string tooLargeErr = str::stream()
@@ -292,8 +289,7 @@ void checkUniqueIndexConstraints(OperationContext* opCtx,
const ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
uassert(ErrorCodes::CannotCreateIndex,
str::stream() << "cannot create unique index over " << newIdxKey
- << " with shard key pattern "
- << shardKeyPattern.toBSON(),
+ << " with shard key pattern " << shardKeyPattern.toBSON(),
shardKeyPattern.isUniqueIndexCompatible(newIdxKey));
}
@@ -392,8 +388,7 @@ Collection* getOrCreateCollection(OperationContext* opCtx,
auto collection = db->createCollection(opCtx, ns, options);
invariant(collection,
str::stream() << "Failed to create collection " << ns.ns()
- << " during index creation: "
- << redact(cmdObj));
+ << " during index creation: " << redact(cmdObj));
wunit.commit();
return collection;
});
@@ -701,9 +696,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
// All other errors should be forwarded to the caller with index build information included.
log() << "Index build failed: " << buildUUID << ": " << ex.toStatus();
ex.addContext(str::stream() << "Index build failed: " << buildUUID << ": Collection " << ns
- << " ( "
- << *collectionUUID
- << " )");
+ << " ( " << *collectionUUID << " )");
throw;
}
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index 3a4cb290a8f..963ce885d85 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -345,7 +345,7 @@ private:
return false;
}
- auto[prev, next] = getPrevAndNextUUIDs(opCtx, collection);
+ auto [prev, next] = getPrevAndNextUUIDs(opCtx, collection);
// Find and report collection metadata.
auto indices = collectionIndexInfo(opCtx, collection);
@@ -558,4 +558,4 @@ public:
MONGO_REGISTER_TEST_COMMAND(DbCheckCmd);
} // namespace
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 2f960430e2a..c4a6f06ac49 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -143,8 +143,8 @@ public:
repl::ReplicationCoordinator::modeNone) &&
(dbname == NamespaceString::kLocalDb)) {
uasserted(ErrorCodes::IllegalOperation,
- str::stream() << "Cannot drop '" << dbname
- << "' database while replication is active");
+ str::stream()
+ << "Cannot drop '" << dbname << "' database while replication is active");
}
BSONElement e = cmdObj.firstElement();
int p = (int)e.number();
diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp
index 5bede2d66f6..f94edb2992f 100644
--- a/src/mongo/db/commands/dbcommands_d.cpp
+++ b/src/mongo/db/commands/dbcommands_d.cpp
@@ -108,7 +108,7 @@ namespace {
/**
* Sets the profiling level, logging/profiling threshold, and logging/profiling sample rate for the
* given database.
-*/
+ */
class CmdProfile : public ProfileCmdBase {
public:
CmdProfile() = default;
@@ -200,8 +200,7 @@ public:
uassert(50847,
str::stream() << "The element that calls binDataClean() must be type of "
"BinData, but type of "
- << typeName(stateElem.type())
- << " found.",
+ << typeName(stateElem.type()) << " found.",
(stateElem.type() == BSONType::BinData));
int len;
@@ -288,8 +287,7 @@ public:
uassert(50849,
str::stream() << "The element that calls binDataClean() must be type "
"of BinData, but type of "
- << owned["data"].type()
- << " found.",
+ << owned["data"].type() << " found.",
owned["data"].type() == BSONType::BinData);
exec->saveState();
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index c150adf60bb..853ba96c51c 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -157,8 +157,7 @@ public:
str::stream() << "$_internalReadAtClusterTime value must not be greater"
" than the last applied opTime. Requested clusterTime: "
<< targetClusterTime.toString()
- << "; last applied opTime: "
- << lastAppliedOpTime.toString(),
+ << "; last applied opTime: " << lastAppliedOpTime.toString(),
lastAppliedOpTime.getTimestamp() >= targetClusterTime);
// We aren't holding the global lock in intent mode, so it is possible for the global
@@ -173,8 +172,7 @@ public:
str::stream() << "$_internalReadAtClusterTime value must not be greater"
" than the all_durable timestamp. Requested clusterTime: "
<< targetClusterTime.toString()
- << "; all_durable timestamp: "
- << allDurableTime.toString(),
+ << "; all_durable timestamp: " << allDurableTime.toString(),
allDurableTime >= targetClusterTime);
// The $_internalReadAtClusterTime option causes any storage-layer cursors created
@@ -334,8 +332,7 @@ private:
str::stream() << "Unable to read from a snapshot due to pending collection"
" catalog changes; please retry the operation. Snapshot"
" timestamp is "
- << mySnapshot->toString()
- << ". Collection minimum timestamp is "
+ << mySnapshot->toString() << ". Collection minimum timestamp is "
<< minSnapshot->toString(),
!minSnapshot || *mySnapshot >= *minSnapshot);
} else {
diff --git a/src/mongo/db/commands/do_txn_cmd.cpp b/src/mongo/db/commands/do_txn_cmd.cpp
index 6c5723c6de3..fbc542f952a 100644
--- a/src/mongo/db/commands/do_txn_cmd.cpp
+++ b/src/mongo/db/commands/do_txn_cmd.cpp
@@ -71,9 +71,7 @@ OplogApplicationValidity validateDoTxnCommand(const BSONObj& doTxnObj) {
} catch (...) {
uasserted(ErrorCodes::FailedToParse,
str::stream() << "cannot apply a malformed operation in doTxn: "
- << redact(opObj)
- << ": "
- << exceptionToStatus().toString());
+ << redact(opObj) << ": " << exceptionToStatus().toString());
}
};
diff --git a/src/mongo/db/commands/driverHelpers.cpp b/src/mongo/db/commands/driverHelpers.cpp
index 58f73648b4e..3a3ca1b8704 100644
--- a/src/mongo/db/commands/driverHelpers.cpp
+++ b/src/mongo/db/commands/driverHelpers.cpp
@@ -87,4 +87,4 @@ public:
return true;
}
} driverObjectIdTest;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index e8eee785040..34447f9b4cf 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -244,4 +244,4 @@ public:
return true;
}
} cmdReIndex;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index 6a8c49f3e20..a29d01cf985 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -153,8 +153,7 @@ std::unique_ptr<CommandInvocation> CmdExplain::parse(OperationContext* opCtx,
if (auto innerDb = explainedObj["$db"]) {
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Mismatched $db in explain command. Expected " << dbname
- << " but got "
- << innerDb.checkAndGetStringData(),
+ << " but got " << innerDb.checkAndGetStringData(),
innerDb.checkAndGetStringData() == dbname);
}
auto explainedCommand = CommandHelpers::findCommand(explainedObj.firstElementFieldName());
diff --git a/src/mongo/db/commands/fail_point_cmd.cpp b/src/mongo/db/commands/fail_point_cmd.cpp
index 52ffb278a22..a50cc4ff06d 100644
--- a/src/mongo/db/commands/fail_point_cmd.cpp
+++ b/src/mongo/db/commands/fail_point_cmd.cpp
@@ -103,4 +103,4 @@ public:
}
};
MONGO_REGISTER_TEST_COMMAND(FaultInjectCmd);
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index cab903e15d3..ec1d65deb3b 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -215,12 +215,9 @@ void FeatureCompatibilityVersion::updateMinWireVersion() {
void FeatureCompatibilityVersion::_validateVersion(StringData version) {
uassert(40284,
str::stream() << "featureCompatibilityVersion must be '"
- << FeatureCompatibilityVersionParser::kVersion42
- << "' or '"
- << FeatureCompatibilityVersionParser::kVersion40
- << "'. See "
- << feature_compatibility_version_documentation::kCompatibilityLink
- << ".",
+ << FeatureCompatibilityVersionParser::kVersion42 << "' or '"
+ << FeatureCompatibilityVersionParser::kVersion40 << "'. See "
+ << feature_compatibility_version_documentation::kCompatibilityLink << ".",
version == FeatureCompatibilityVersionParser::kVersion42 ||
version == FeatureCompatibilityVersionParser::kVersion40);
}
diff --git a/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp b/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
index 6d68b8f417b..919a2aae34c 100644
--- a/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
@@ -56,10 +56,7 @@ StatusWith<std::string> FeatureCompatibilityVersionCommandParser::extractVersion
return {ErrorCodes::TypeMismatch,
str::stream() << "Command argument must be of type "
"String, but was of type "
- << typeName(versionElem.type())
- << " in: "
- << cmdObj
- << ". See "
+ << typeName(versionElem.type()) << " in: " << cmdObj << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< "."};
}
@@ -73,9 +70,7 @@ StatusWith<std::string> FeatureCompatibilityVersionCommandParser::extractVersion
uasserted(ErrorCodes::InvalidOptions,
str::stream() << "Unrecognized field found " << cmdElem.fieldNameStringData()
- << " in "
- << cmdObj
- << ". See "
+ << " in " << cmdObj << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
@@ -86,14 +81,9 @@ StatusWith<std::string> FeatureCompatibilityVersionCommandParser::extractVersion
version != FeatureCompatibilityVersionParser::kVersion40) {
return {ErrorCodes::BadValue,
str::stream() << "Invalid command argument. Expected '"
- << FeatureCompatibilityVersionParser::kVersion42
- << "' or '"
- << FeatureCompatibilityVersionParser::kVersion40
- << "', found "
- << version
- << " in: "
- << cmdObj
- << ". See "
+ << FeatureCompatibilityVersionParser::kVersion42 << "' or '"
+ << FeatureCompatibilityVersionParser::kVersion40 << "', found "
+ << version << " in: " << cmdObj << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< "."};
}
diff --git a/src/mongo/db/commands/feature_compatibility_version_documentation.h b/src/mongo/db/commands/feature_compatibility_version_documentation.h
index 7b51814b2ac..0be6c0b1f39 100644
--- a/src/mongo/db/commands/feature_compatibility_version_documentation.h
+++ b/src/mongo/db/commands/feature_compatibility_version_documentation.h
@@ -34,5 +34,5 @@ namespace feature_compatibility_version_documentation {
constexpr StringData kCompatibilityLink =
"http://dochub.mongodb.org/core/4.0-feature-compatibility"_sd;
constexpr StringData kUpgradeLink = "http://dochub.mongodb.org/core/4.0-upgrade-fcv"_sd;
-}
-}
+} // namespace feature_compatibility_version_documentation
+} // namespace mongo
diff --git a/src/mongo/db/commands/feature_compatibility_version_parser.cpp b/src/mongo/db/commands/feature_compatibility_version_parser.cpp
index 4a86d174468..0aa872b9041 100644
--- a/src/mongo/db/commands/feature_compatibility_version_parser.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version_parser.cpp
@@ -61,37 +61,26 @@ FeatureCompatibilityVersionParser::parse(const BSONObj& featureCompatibilityVers
continue;
} else if (fieldName == kVersionField || fieldName == kTargetVersionField) {
if (elem.type() != BSONType::String) {
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << fieldName << " must be of type String, but was of type "
- << typeName(elem.type())
- << ". Contents of "
- << kParameterName
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << fieldName << " must be of type String, but was of type "
+ << typeName(elem.type()) << ". Contents of " << kParameterName
<< " document in "
<< NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ << ": " << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
if (elem.String() != kVersion42 && elem.String() != kVersion40) {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Invalid value for " << fieldName << ", found "
- << elem.String()
- << ", expected '"
- << kVersion42
- << "' or '"
- << kVersion40
- << "'. Contents of "
- << kParameterName
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Invalid value for " << fieldName << ", found "
+ << elem.String() << ", expected '" << kVersion42 << "' or '"
+ << kVersion40 << "'. Contents of " << kParameterName
<< " document in "
<< NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ << ": " << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
@@ -102,15 +91,12 @@ FeatureCompatibilityVersionParser::parse(const BSONObj& featureCompatibilityVers
targetVersionString = elem.String();
}
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Unrecognized field '" << fieldName << "'. Contents of "
- << kParameterName
- << " document in "
- << NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Unrecognized field '" << fieldName << "'. Contents of "
+ << kParameterName << " document in "
+ << NamespaceString::kServerConfigurationNamespace.toString() << ": "
+ << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
@@ -126,28 +112,23 @@ FeatureCompatibilityVersionParser::parse(const BSONObj& featureCompatibilityVers
}
} else if (versionString == kVersion42) {
if (targetVersionString == kVersion42 || targetVersionString == kVersion40) {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Invalid state for " << kParameterName << " document in "
- << NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Invalid state for " << kParameterName << " document in "
+ << NamespaceString::kServerConfigurationNamespace.toString() << ": "
+ << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
} else {
version = ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42;
}
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Missing required field '" << kVersionField << "''. Contents of "
- << kParameterName
- << " document in "
- << NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Missing required field '" << kVersionField << "''. Contents of "
+ << kParameterName << " document in "
+ << NamespaceString::kServerConfigurationNamespace.toString() << ": "
+ << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 533268011ed..e9970426ba7 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -217,8 +217,8 @@ public:
} catch (DBException& error) {
if (error.code() == ErrorCodes::InvalidPipelineOperator) {
uasserted(ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Unsupported in view pipeline: "
- << error.what());
+ str::stream()
+ << "Unsupported in view pipeline: " << error.what());
}
throw;
}
@@ -330,8 +330,7 @@ public:
str::stream() << "$_internalReadAtClusterTime value must not be greater"
" than the last applied opTime. Requested clusterTime: "
<< targetClusterTime->toString()
- << "; last applied opTime: "
- << lastAppliedOpTime.toString(),
+ << "; last applied opTime: " << lastAppliedOpTime.toString(),
lastAppliedOpTime.getTimestamp() >= targetClusterTime);
// We aren't holding the global lock in intent mode, so it is possible for the
@@ -347,8 +346,7 @@ public:
" than the all_durable timestamp. Requested"
" clusterTime: "
<< targetClusterTime->toString()
- << "; all_durable timestamp: "
- << allDurableTime.toString(),
+ << "; all_durable timestamp: " << allDurableTime.toString(),
allDurableTime >= targetClusterTime);
// The $_internalReadAtClusterTime option causes any storage-layer cursors created
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 37b0bae2cf8..5d9bda46bae 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -64,7 +64,7 @@ namespace {
// Ensures that only one command is operating on fsyncLock state at a time. As a 'ResourceMutex',
// lock time will be reported for a given user operation.
Lock::ResourceMutex commandMutex("fsyncCommandMutex");
-}
+} // namespace
/**
* Maintains a global read lock while mongod is fsyncLocked.
@@ -437,4 +437,4 @@ MONGO_INITIALIZER(fsyncLockedForWriting)(InitializerContext* context) {
setLockedForWritingImpl([]() { return fsyncCmd.fsyncLocked(); });
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/fsync_locked.h b/src/mongo/db/commands/fsync_locked.h
index a84c03209de..bf530eb643e 100644
--- a/src/mongo/db/commands/fsync_locked.h
+++ b/src/mongo/db/commands/fsync_locked.h
@@ -33,14 +33,14 @@
namespace mongo {
/**
-* Returns true if mongod is currently fsyncLocked.
-*/
+ * Returns true if mongod is currently fsyncLocked.
+ */
bool lockedForWriting();
/**
-* Sets the implementation for lockedForWriting(). Should be done once during startup in a
-* MONGO_INITIALIZER.
-*/
+ * Sets the implementation for lockedForWriting(). Should be done once during startup in a
+ * MONGO_INITIALIZER.
+ */
void setLockedForWritingImpl(stdx::function<bool()> impl);
} // namespace mongo
diff --git a/src/mongo/db/commands/generic_servers.cpp b/src/mongo/db/commands/generic_servers.cpp
index 39f1b7cd225..c39c95b4cfd 100644
--- a/src/mongo/db/commands/generic_servers.cpp
+++ b/src/mongo/db/commands/generic_servers.cpp
@@ -240,9 +240,7 @@ public:
if (val.type() != String) {
uasserted(ErrorCodes::TypeMismatch,
str::stream() << "Argument to getLog must be of type String; found "
- << val.toString(false)
- << " of type "
- << typeName(val.type()));
+ << val.toString(false) << " of type " << typeName(val.type()));
}
string p = val.String();
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index b4977da6cf3..8e3530d7169 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -87,17 +87,14 @@ void validateLSID(OperationContext* opCtx, const GetMoreRequest& request, Client
uassert(50737,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in session "
- << *cursor->getSessionId()
+ << ", which was created in session " << *cursor->getSessionId()
<< ", without an lsid",
opCtx->getLogicalSessionId() || !cursor->getSessionId());
uassert(50738,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in session "
- << *cursor->getSessionId()
- << ", in session "
- << *opCtx->getLogicalSessionId(),
+ << ", which was created in session " << *cursor->getSessionId()
+ << ", in session " << *opCtx->getLogicalSessionId(),
!opCtx->getLogicalSessionId() || !cursor->getSessionId() ||
(opCtx->getLogicalSessionId() == cursor->getSessionId()));
}
@@ -117,17 +114,14 @@ void validateTxnNumber(OperationContext* opCtx,
uassert(50740,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in transaction "
- << *cursor->getTxnNumber()
+ << ", which was created in transaction " << *cursor->getTxnNumber()
<< ", without a txnNumber",
opCtx->getTxnNumber() || !cursor->getTxnNumber());
uassert(50741,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in transaction "
- << *cursor->getTxnNumber()
- << ", in transaction "
- << *opCtx->getTxnNumber(),
+ << ", which was created in transaction " << *cursor->getTxnNumber()
+ << ", in transaction " << *opCtx->getTxnNumber(),
!opCtx->getTxnNumber() || !cursor->getTxnNumber() ||
(*opCtx->getTxnNumber() == *cursor->getTxnNumber()));
}
@@ -438,8 +432,8 @@ public:
// Ensure that the client still has the privileges to run the originating command.
if (!authzSession->isAuthorizedForPrivileges(cursorPin->getOriginatingPrivileges())) {
uasserted(ErrorCodes::Unauthorized,
- str::stream() << "not authorized for getMore with cursor id "
- << _request.cursorid);
+ str::stream()
+ << "not authorized for getMore with cursor id " << _request.cursorid);
}
if (_request.nss != cursorPin->nss()) {
diff --git a/src/mongo/db/commands/hashcmd.cpp b/src/mongo/db/commands/hashcmd.cpp
index 2d69dcb6e9f..04b3c7f87ed 100644
--- a/src/mongo/db/commands/hashcmd.cpp
+++ b/src/mongo/db/commands/hashcmd.cpp
@@ -101,4 +101,4 @@ public:
}
};
MONGO_REGISTER_TEST_COMMAND(CmdHashElt);
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index f7be9462caa..de26b32142b 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -110,8 +110,8 @@ namespace mongo {
using std::string;
using std::stringstream;
-using std::vector;
using std::unique_ptr;
+using std::vector;
IndexFilterCommand::IndexFilterCommand(const string& name, const string& helpText)
: BasicCommand(name), helpText(helpText) {}
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 52f4c49f2f2..2c34bb715c7 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -188,4 +188,4 @@ public:
return true;
}
} cmdListDatabases;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index ae25d883d19..6f119e2baf9 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -127,8 +127,8 @@ public:
}
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to list indexes on collection: "
- << nss.ns());
+ str::stream()
+ << "Not authorized to list indexes on collection: " << nss.ns());
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/db/commands/lock_info.cpp b/src/mongo/db/commands/lock_info.cpp
index d3250df05b1..bf52720e65e 100644
--- a/src/mongo/db/commands/lock_info.cpp
+++ b/src/mongo/db/commands/lock_info.cpp
@@ -105,4 +105,4 @@ public:
return true;
}
} cmdLockInfo;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 5290d324a2d..0e6d4baa02b 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -658,9 +658,7 @@ void State::appendResults(BSONObjBuilder& final) {
BSONObj idKey = BSON("_id" << 1);
if (!_db.runCommand("admin",
BSON("splitVector" << _config.outputOptions.finalNamespace.ns()
- << "keyPattern"
- << idKey
- << "maxChunkSizeBytes"
+ << "keyPattern" << idKey << "maxChunkSizeBytes"
<< _config.splitInfo),
res)) {
uasserted(15921, str::stream() << "splitVector failed: " << res);
@@ -748,8 +746,7 @@ long long State::postProcessCollectionNonAtomic(OperationContext* opCtx,
if (!_db.runCommand("admin",
BSON("renameCollection" << _config.tempNamespace.ns() << "to"
<< _config.outputOptions.finalNamespace.ns()
- << "stayTemp"
- << _config.shardedFirstPass),
+ << "stayTemp" << _config.shardedFirstPass),
info)) {
uasserted(10076, str::stream() << "rename failed: " << info);
}
@@ -833,9 +830,7 @@ void State::insert(const NamespaceString& nss, const BSONObj& o) {
uassert(
ErrorCodes::PrimarySteppedDown,
str::stream() << "no longer primary while inserting mapReduce result into collection: "
- << nss
- << ": "
- << redact(o),
+ << nss << ": " << redact(o),
repl::ReplicationCoordinator::get(_opCtx)->canAcceptWritesFor(_opCtx, nss));
assertCollectionNotNull(nss, autoColl);
@@ -882,10 +877,8 @@ void State::_insertToInc(BSONObj& o) {
if (o.objsize() > BSONObjMaxUserSize) {
uasserted(ErrorCodes::BadValue,
str::stream() << "object to insert too large for incremental collection"
- << ". size in bytes: "
- << o.objsize()
- << ", max size: "
- << BSONObjMaxUserSize);
+ << ". size in bytes: " << o.objsize()
+ << ", max size: " << BSONObjMaxUserSize);
}
// TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261.
@@ -934,8 +927,9 @@ State::~State() {
_useIncremental ? _config.incLong : NamespaceString());
} catch (...) {
error() << "Unable to drop temporary collection created by mapReduce: "
- << _config.tempNamespace << ". This collection will be removed automatically "
- "the next time the server starts up. "
+ << _config.tempNamespace
+ << ". This collection will be removed automatically "
+ "the next time the server starts up. "
<< exceptionToStatus();
}
}
diff --git a/src/mongo/db/commands/mr.h b/src/mongo/db/commands/mr.h
index d9ced2102af..aab8f011d09 100644
--- a/src/mongo/db/commands/mr.h
+++ b/src/mongo/db/commands/mr.h
@@ -158,7 +158,7 @@ private:
* result in "__returnValue"
* @param key OUT
* @param endSizeEstimate OUT
- */
+ */
void _reduce(const BSONList& values, BSONObj& key, int& endSizeEstimate);
JSFunction _func;
@@ -281,13 +281,13 @@ public:
void emit(const BSONObj& a);
/**
- * Checks the size of the transient in-memory results accumulated so far and potentially
- * runs reduce in order to compact them. If the data is still too large, it will be
- * spilled to the output collection.
- *
- * NOTE: Make sure that no DB locks are held, when calling this function, because it may
- * try to acquire write DB lock for the write to the output collection.
- */
+ * Checks the size of the transient in-memory results accumulated so far and potentially
+ * runs reduce in order to compact them. If the data is still too large, it will be
+ * spilled to the output collection.
+ *
+ * NOTE: Make sure that no DB locks are held, when calling this function, because it may
+ * try to acquire write DB lock for the write to the output collection.
+ */
void reduceAndSpillInMemoryStateIfNeeded();
/**
diff --git a/src/mongo/db/commands/mr_common.cpp b/src/mongo/db/commands/mr_common.cpp
index b3c973adebd..630379438ba 100644
--- a/src/mongo/db/commands/mr_common.cpp
+++ b/src/mongo/db/commands/mr_common.cpp
@@ -157,5 +157,5 @@ bool mrSupportsWriteConcern(const BSONObj& cmd) {
return true;
}
}
-}
-}
+} // namespace mr
+} // namespace mongo
diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp
index 615cd5f3c9f..4a6d428dc66 100644
--- a/src/mongo/db/commands/mr_test.cpp
+++ b/src/mongo/db/commands/mr_test.cpp
@@ -74,11 +74,7 @@ void _compareOutputOptionField(const std::string& dbname,
if (actual == expected)
return;
FAIL(str::stream() << "parseOutputOptions(\"" << dbname << ", " << cmdObjStr << "): "
- << fieldName
- << ": Expected: "
- << expected
- << ". Actual: "
- << actual);
+ << fieldName << ": Expected: " << expected << ". Actual: " << actual);
}
/**
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index b68630938dc..6e1366815a3 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -306,8 +306,8 @@ public:
// Make sure we are allowed to change this parameter
if (!foundParameter->second->allowedToChangeAtRuntime()) {
- errmsg = str::stream() << "not allowed to change [" << parameterName
- << "] at runtime";
+ errmsg = str::stream()
+ << "not allowed to change [" << parameterName << "] at runtime";
return false;
}
@@ -365,9 +365,8 @@ public:
log() << "successfully set parameter " << parameterName << " to "
<< redact(parameter.toString(false))
- << (oldValue ? std::string(str::stream() << " (was "
- << redact(oldValue.toString(false))
- << ")")
+ << (oldValue ? std::string(str::stream()
+ << " (was " << redact(oldValue.toString(false)) << ")")
: "");
numSet++;
@@ -422,8 +421,8 @@ void LogComponentVerbosityServerParameter::append(OperationContext*,
Status LogComponentVerbosityServerParameter::set(const BSONElement& newValueElement) {
if (!newValueElement.isABSONObj()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "log component verbosity is not a BSON object: "
- << newValueElement);
+ str::stream()
+ << "log component verbosity is not a BSON object: " << newValueElement);
}
return setLogComponentVerbosity(newValueElement.Obj());
}
@@ -456,9 +455,7 @@ Status AutomationServiceDescriptorServerParameter::setFromString(const std::stri
if (str.size() > kMaxSize)
return {ErrorCodes::Overflow,
str::stream() << "Value for parameter automationServiceDescriptor"
- << " must be no more than "
- << kMaxSize
- << " bytes"};
+ << " must be no more than " << kMaxSize << " bytes"};
{
const stdx::lock_guard<stdx::mutex> lock(autoServiceDescriptorMutex);
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index e112b19b9fc..68c04442192 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -104,8 +104,8 @@ namespace mongo {
using std::string;
using std::stringstream;
-using std::vector;
using std::unique_ptr;
+using std::vector;
PlanCacheCommand::PlanCacheCommand(const string& name,
const string& helpText,
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 1632207d392..b118407ef65 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -341,12 +341,12 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
// Check keys in cache before dropping {b: 1}
vector<BSONObj> shapesBefore = getShapes(planCache);
ASSERT_EQUALS(shapesBefore.size(), 2U);
- BSONObj shapeA = BSON(
- "query" << cqA->getQueryObj() << "sort" << cqA->getQueryRequest().getSort() << "projection"
- << cqA->getQueryRequest().getProj());
- BSONObj shapeB = BSON(
- "query" << cqB->getQueryObj() << "sort" << cqB->getQueryRequest().getSort() << "projection"
- << cqB->getQueryRequest().getProj());
+ BSONObj shapeA =
+ BSON("query" << cqA->getQueryObj() << "sort" << cqA->getQueryRequest().getSort()
+ << "projection" << cqA->getQueryRequest().getProj());
+ BSONObj shapeB =
+ BSON("query" << cqB->getQueryObj() << "sort" << cqB->getQueryRequest().getSort()
+ << "projection" << cqB->getQueryRequest().getProj());
ASSERT_TRUE(
std::find_if(shapesBefore.begin(), shapesBefore.end(), [&shapeA](const BSONObj& obj) {
auto filteredObj = obj.removeField("queryHash");
@@ -413,14 +413,11 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
vector<BSONObj> shapesBefore = getShapes(planCache);
ASSERT_EQUALS(shapesBefore.size(), 2U);
BSONObj shape = BSON("query" << cq->getQueryObj() << "sort" << cq->getQueryRequest().getSort()
- << "projection"
- << cq->getQueryRequest().getProj());
- BSONObj shapeWithCollation = BSON("query" << cqCollation->getQueryObj() << "sort"
- << cqCollation->getQueryRequest().getSort()
- << "projection"
- << cqCollation->getQueryRequest().getProj()
- << "collation"
- << cqCollation->getCollator()->getSpec().toBSON());
+ << "projection" << cq->getQueryRequest().getProj());
+ BSONObj shapeWithCollation = BSON(
+ "query" << cqCollation->getQueryObj() << "sort" << cqCollation->getQueryRequest().getSort()
+ << "projection" << cqCollation->getQueryRequest().getProj() << "collation"
+ << cqCollation->getCollator()->getSpec().toBSON());
ASSERT_TRUE(
std::find_if(shapesBefore.begin(), shapesBefore.end(), [&shape](const BSONObj& obj) {
auto filteredObj = obj.removeField("queryHash");
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index 9618543a0a9..3fd5a94da1c 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -115,4 +115,4 @@ public:
return true;
}
} repairCursorCmd;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 4dccbfef572..c62ebad77c4 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -299,8 +299,8 @@ StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNames
auto resolvedView = viewCatalog->resolveView(opCtx, involvedNs);
if (!resolvedView.isOK()) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Failed to resolve view '" << involvedNs.ns() << "': "
- << resolvedView.getStatus().toString()};
+ str::stream() << "Failed to resolve view '" << involvedNs.ns()
+ << "': " << resolvedView.getStatus().toString()};
}
resolvedNamespaces[involvedNs.coll()] = {resolvedView.getValue().getNamespace(),
diff --git a/src/mongo/db/commands/server_status_internal.cpp b/src/mongo/db/commands/server_status_internal.cpp
index 738b22e8945..d5776746dd1 100644
--- a/src/mongo/db/commands/server_status_internal.cpp
+++ b/src/mongo/db/commands/server_status_internal.cpp
@@ -83,4 +83,4 @@ void MetricTree::appendTo(BSONObjBuilder& b) const {
bb.done();
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/server_status_internal.h b/src/mongo/db/commands/server_status_internal.h
index cbd67fa0056..f9bde775db3 100644
--- a/src/mongo/db/commands/server_status_internal.h
+++ b/src/mongo/db/commands/server_status_internal.h
@@ -52,4 +52,4 @@ private:
std::map<std::string, MetricTree*> _subtrees;
std::map<std::string, ServerStatusMetric*> _metrics;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/server_status_metric.cpp b/src/mongo/db/commands/server_status_metric.cpp
index d56a2970ec6..264844c02ad 100644
--- a/src/mongo/db/commands/server_status_metric.cpp
+++ b/src/mongo/db/commands/server_status_metric.cpp
@@ -49,4 +49,4 @@ string ServerStatusMetric::_parseLeafName(const string& name) {
return name.substr(idx + 1);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/server_status_metric.h b/src/mongo/db/commands/server_status_metric.h
index 01c695ff046..f64327908e7 100644
--- a/src/mongo/db/commands/server_status_metric.h
+++ b/src/mongo/db/commands/server_status_metric.h
@@ -88,4 +88,4 @@ public:
private:
const T* _t;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/sleep_command.cpp b/src/mongo/db/commands/sleep_command.cpp
index 73f10c97422..6315265de72 100644
--- a/src/mongo/db/commands/sleep_command.cpp
+++ b/src/mongo/db/commands/sleep_command.cpp
@@ -153,4 +153,4 @@ public:
};
MONGO_REGISTER_TEST_COMMAND(CmdSleep);
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp
index 3485f623c7d..01b3d7b8c74 100644
--- a/src/mongo/db/commands/snapshot_management.cpp
+++ b/src/mongo/db/commands/snapshot_management.cpp
@@ -128,4 +128,4 @@ public:
}
};
MONGO_REGISTER_TEST_COMMAND(CmdSetCommittedSnapshot);
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 1410bb5a3bc..d981b24ad1d 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -209,4 +209,4 @@ public:
};
MONGO_REGISTER_TEST_COMMAND(EmptyCapped);
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 623cffb0367..a155d443692 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -101,8 +101,7 @@ BSONArray roleSetToBSONArray(const stdx::unordered_set<RoleName>& roles) {
++it) {
const RoleName& role = *it;
rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
<< role.getDB()));
}
return rolesArrayBuilder.arr();
@@ -113,8 +112,7 @@ BSONArray rolesVectorToBSONArray(const std::vector<RoleName>& roles) {
for (std::vector<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
const RoleName& role = *it;
rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
<< role.getDB()));
}
return rolesArrayBuilder.arr();
@@ -174,14 +172,14 @@ Status checkOkayToGrantRolesToRole(OperationContext* opCtx,
const RoleName& roleToAdd = *it;
if (roleToAdd == role) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot grant role " << role.getFullName()
- << " to itself.");
+ str::stream()
+ << "Cannot grant role " << role.getFullName() << " to itself.");
}
if (role.getDB() != "admin" && roleToAdd.getDB() != role.getDB()) {
- return Status(
- ErrorCodes::InvalidRoleModification,
- str::stream() << "Roles on the \'" << role.getDB()
+ return Status(ErrorCodes::InvalidRoleModification,
+ str::stream()
+ << "Roles on the \'" << role.getDB()
<< "\' database cannot be granted roles from other databases");
}
@@ -431,14 +429,13 @@ Status insertRoleDocument(OperationContext* opCtx, const BSONObj& roleObj) {
* Updates the given role object with the given update modifier.
*/
Status updateRoleDocument(OperationContext* opCtx, const RoleName& role, const BSONObj& updateObj) {
- Status status = updateOneAuthzDocument(opCtx,
- AuthorizationManager::rolesCollectionNamespace,
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << role.getDB()),
- updateObj,
- false);
+ Status status = updateOneAuthzDocument(
+ opCtx,
+ AuthorizationManager::rolesCollectionNamespace,
+ BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()),
+ updateObj,
+ false);
if (status.isOK()) {
return status;
}
@@ -516,13 +513,12 @@ Status updatePrivilegeDocument(OperationContext* opCtx,
Status updatePrivilegeDocument(OperationContext* opCtx,
const UserName& user,
const BSONObj& updateObj) {
- const auto status = updatePrivilegeDocument(opCtx,
- user,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << user.getUser()
- << AuthorizationManager::USER_DB_FIELD_NAME
- << user.getDB()),
- updateObj);
+ const auto status = updatePrivilegeDocument(
+ opCtx,
+ user,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << user.getUser() << AuthorizationManager::USER_DB_FIELD_NAME << user.getDB()),
+ updateObj);
return status;
}
@@ -621,8 +617,7 @@ StatusWith<AuthzLockGuard> requireWritableAuthSchema28SCRAM(OperationContext* op
str::stream()
<< "User and role management commands require auth data to have "
<< "at least schema version "
- << AuthorizationManager::schemaVersion28SCRAM
- << " but found "
+ << AuthorizationManager::schemaVersion28SCRAM << " but found "
<< foundSchemaVersion);
}
status = writeAuthSchemaVersionIfNeeded(opCtx, authzManager, foundSchemaVersion);
@@ -658,8 +653,7 @@ StatusWith<AuthzLockGuard> requireReadableAuthSchema26Upgrade(OperationContext*
return Status(ErrorCodes::AuthSchemaIncompatible,
str::stream() << "The usersInfo and rolesInfo commands require auth data to "
<< "have at least schema version "
- << AuthorizationManager::schemaVersion26Upgrade
- << " but found "
+ << AuthorizationManager::schemaVersion26Upgrade << " but found "
<< foundSchemaVersion);
}
@@ -2022,9 +2016,9 @@ public:
&nMatched);
if (!status.isOK()) {
uassertStatusOK(useDefaultCode(status, ErrorCodes::UserModificationFailed)
- .withContext(str::stream() << "Failed to remove role "
- << roleName.getFullName()
- << " from all users"));
+ .withContext(str::stream()
+ << "Failed to remove role " << roleName.getFullName()
+ << " from all users"));
}
// Remove this role from all other roles
@@ -2045,9 +2039,9 @@ public:
if (!status.isOK()) {
uassertStatusOK(
useDefaultCode(status, ErrorCodes::RoleModificationFailed)
- .withContext(
- str::stream() << "Removed role " << roleName.getFullName()
- << " from all users but failed to remove from all roles"));
+ .withContext(str::stream()
+ << "Removed role " << roleName.getFullName()
+ << " from all users but failed to remove from all roles"));
}
audit::logDropRole(Client::getCurrent(), roleName);
@@ -2139,13 +2133,12 @@ public:
if (!status.isOK()) {
uassertStatusOK(useDefaultCode(status, ErrorCodes::UserModificationFailed)
.withContext(str::stream() << "Failed to remove roles from \""
- << dbname
- << "\" db from all users"));
+ << dbname << "\" db from all users"));
}
// Remove these roles from all other roles
- std::string sourceFieldName = str::stream() << "roles."
- << AuthorizationManager::ROLE_DB_FIELD_NAME;
+ std::string sourceFieldName = str::stream()
+ << "roles." << AuthorizationManager::ROLE_DB_FIELD_NAME;
status = updateAuthzDocuments(
opCtx,
AuthorizationManager::rolesCollectionNamespace,
@@ -2158,8 +2151,7 @@ public:
if (!status.isOK()) {
uassertStatusOK(useDefaultCode(status, ErrorCodes::RoleModificationFailed)
.withContext(str::stream() << "Failed to remove roles from \""
- << dbname
- << "\" db from all roles"));
+ << dbname << "\" db from all roles"));
}
audit::logDropAllRolesFromDatabase(Client::getCurrent(), dbname);
@@ -2580,9 +2572,7 @@ public:
BSONObj query =
db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db);
BSONObj fields = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << 1
- << AuthorizationManager::USER_DB_FIELD_NAME
- << 1);
+ << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
Status status =
queryAuthzDocument(opCtx,
@@ -2653,9 +2643,7 @@ public:
BSONObj query =
db.empty() ? BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db);
BSONObj fields = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << 1
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << 1);
+ << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
Status status =
queryAuthzDocument(opCtx,
diff --git a/src/mongo/db/commands/user_management_commands_common.cpp b/src/mongo/db/commands/user_management_commands_common.cpp
index 7abc55ab60a..08e4e5345c1 100644
--- a/src/mongo/db/commands/user_management_commands_common.cpp
+++ b/src/mongo/db/commands/user_management_commands_common.cpp
@@ -58,8 +58,8 @@ Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
for (size_t i = 0; i < roles.size(); ++i) {
if (!authzSession->isAuthorizedToGrantRole(roles[i])) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to grant role: "
- << roles[i].getFullName());
+ str::stream()
+ << "Not authorized to grant role: " << roles[i].getFullName());
}
}
@@ -83,8 +83,8 @@ Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
for (size_t i = 0; i < roles.size(); ++i) {
if (!authzSession->isAuthorizedToRevokeRole(roles[i])) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to revoke role: "
- << roles[i].getFullName());
+ str::stream()
+ << "Not authorized to revoke role: " << roles[i].getFullName());
}
}
return Status::OK();
@@ -129,8 +129,8 @@ Status checkAuthForCreateUserCommand(Client* client,
if (!authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forDatabaseName(args.userName.getDB()), ActionType::createUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create users on db: "
- << args.userName.getDB());
+ str::stream()
+ << "Not authorized to create users on db: " << args.userName.getDB());
}
status = checkAuthorizedToGrantRoles(authzSession, args.roles);
@@ -231,8 +231,8 @@ Status checkAuthForCreateRoleCommand(Client* client,
if (!authzSession->isAuthorizedToCreateRole(args)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create roles on db: "
- << args.roleName.getDB());
+ str::stream()
+ << "Not authorized to create roles on db: " << args.roleName.getDB());
}
status = checkAuthorizedToGrantRoles(authzSession, args.roles);
@@ -365,8 +365,8 @@ Status checkAuthForDropAllUsersFromDatabaseCommand(Client* client, const std::st
if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
ActionType::dropUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop users from the " << dbname
- << " database");
+ str::stream()
+ << "Not authorized to drop users from the " << dbname << " database");
}
return Status::OK();
}
@@ -415,8 +415,8 @@ Status checkAuthForUsersInfoCommand(Client* client,
if (!authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forDatabaseName(dbname), ActionType::viewUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to view users from the " << dbname
- << " database");
+ str::stream()
+ << "Not authorized to view users from the " << dbname << " database");
}
} else if (args.target == auth::UsersInfoArgs::Target::kGlobal) {
if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
@@ -462,8 +462,8 @@ Status checkAuthForDropAllRolesFromDatabaseCommand(Client* client, const std::st
if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
ActionType::dropRole)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop roles from the " << dbname
- << " database");
+ str::stream()
+ << "Not authorized to drop roles from the " << dbname << " database");
}
return Status::OK();
}
@@ -482,8 +482,8 @@ Status checkAuthForRolesInfoCommand(Client* client,
if (!authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forDatabaseName(dbname), ActionType::viewRole)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to view roles from the " << dbname
- << " database");
+ str::stream()
+ << "Not authorized to view roles from the " << dbname << " database");
}
} else {
for (size_t i = 0; i < args.roleNames.size(); ++i) {
@@ -496,8 +496,7 @@ Status checkAuthForRolesInfoCommand(Client* client,
ActionType::viewRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to view roles from the "
- << args.roleNames[i].getDB()
- << " database");
+ << args.roleNames[i].getDB() << " database");
}
}
}
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 2daeaf3372e..71c4ba0852f 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -210,4 +210,4 @@ public:
}
} validateCmd;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 1c63ac0756b..c427ff42598 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -111,7 +111,7 @@ void serializeReply(OperationContext* opCtx,
BSONSizeTracker upsertInfoSizeTracker;
BSONSizeTracker errorsSizeTracker;
- auto errorMessage = [&, errorSize = size_t(0) ](StringData rawMessage) mutable {
+ auto errorMessage = [&, errorSize = size_t(0)](StringData rawMessage) mutable {
// Start truncating error messages once both of these limits are exceeded.
constexpr size_t kErrorSizeTruncationMin = 1024 * 1024;
constexpr size_t kErrorCountTruncationMin = 2;
diff --git a/src/mongo/db/concurrency/d_concurrency_bm.cpp b/src/mongo/db/concurrency/d_concurrency_bm.cpp
index c375ffdd73f..95c6771badf 100644
--- a/src/mongo/db/concurrency/d_concurrency_bm.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_bm.cpp
@@ -52,8 +52,8 @@ public:
void makeKClientsWithLockers(int k) {
clients.reserve(k);
for (int i = 0; i < k; ++i) {
- auto client = getGlobalServiceContext()->makeClient(
- str::stream() << "test client for thread " << i);
+ auto client = getGlobalServiceContext()->makeClient(str::stream()
+ << "test client for thread " << i);
auto opCtx = client->makeOperationContext();
opCtx->swapLockState(std::make_unique<LockerImpl>());
clients.emplace_back(std::move(client), std::move(opCtx));
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 26fab1084cf..bf5e0f224b3 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -2094,7 +2094,7 @@ public:
bool activeTransaction = true;
};
-}
+} // namespace
TEST_F(DConcurrencyTestFixture, TestGlobalLockAbandonsSnapshotWhenNotInWriteUnitOfWork) {
auto clients = makeKClientsWithLockers(1);
diff --git a/src/mongo/db/concurrency/lock_manager.cpp b/src/mongo/db/concurrency/lock_manager.cpp
index e3c7fc77809..5dcbfa07f28 100644
--- a/src/mongo/db/concurrency/lock_manager.cpp
+++ b/src/mongo/db/concurrency/lock_manager.cpp
@@ -102,7 +102,10 @@ uint32_t modeMask(LockMode mode) {
* Maps the LockRequest status to a human-readable string.
*/
static const char* LockRequestStatusNames[] = {
- "new", "granted", "waiting", "converting",
+ "new",
+ "granted",
+ "waiting",
+ "converting",
};
// Ensure we do not add new status types without updating the names array
diff --git a/src/mongo/db/concurrency/lock_manager.h b/src/mongo/db/concurrency/lock_manager.h
index ab113b48aad..50b2116d953 100644
--- a/src/mongo/db/concurrency/lock_manager.h
+++ b/src/mongo/db/concurrency/lock_manager.h
@@ -60,32 +60,32 @@ public:
~LockManager();
/**
- * Acquires lock on the specified resource in the specified mode and returns the outcome
- * of the operation. See the details for LockResult for more information on what the
- * different results mean.
- *
- * Locking the same resource twice increments the reference count of the lock so each call
- * to lock must be matched with a call to unlock with the same resource.
- *
- * @param resId Id of the resource to be locked.
- * @param request LockRequest structure on which the state of the request will be tracked.
- * This value cannot be NULL and the notify value must be set. If the
- * return value is not LOCK_WAITING, this pointer can be freed and will
- * not be used any more.
- *
- * If the return value is LOCK_WAITING, the notification method will be called
- * at some point into the future, when the lock becomes granted. If unlock is
- * called before the lock becomes granted, the notification will not be
- * invoked.
- *
- * If the return value is LOCK_WAITING, the notification object *must*
- * live at least until the notify method has been invoked or unlock has
- * been called for the resource it was assigned to. Failure to do so will
- * cause the lock manager to call into an invalid memory location.
- * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
- *
- * @return See comments for LockResult.
- */
+ * Acquires lock on the specified resource in the specified mode and returns the outcome
+ * of the operation. See the details for LockResult for more information on what the
+ * different results mean.
+ *
+ * Locking the same resource twice increments the reference count of the lock so each call
+ * to lock must be matched with a call to unlock with the same resource.
+ *
+ * @param resId Id of the resource to be locked.
+ * @param request LockRequest structure on which the state of the request will be tracked.
+ * This value cannot be NULL and the notify value must be set. If the
+ * return value is not LOCK_WAITING, this pointer can be freed and will
+ * not be used any more.
+ *
+ * If the return value is LOCK_WAITING, the notification method will be called
+ * at some point into the future, when the lock becomes granted. If unlock is
+ * called before the lock becomes granted, the notification will not be
+ * invoked.
+ *
+ * If the return value is LOCK_WAITING, the notification object *must*
+ * live at least until the notify method has been invoked or unlock has
+ * been called for the resource it was assigned to. Failure to do so will
+ * cause the lock manager to call into an invalid memory location.
+ * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
+ *
+ * @return See comments for LockResult.
+ */
LockResult lock(ResourceId resId, LockRequest* request, LockMode mode);
LockResult convert(ResourceId resId, LockRequest* request, LockMode newMode);
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 40e14bff3ac..b08e3cc958a 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -328,8 +328,7 @@ void LockerImpl::reacquireTicket(OperationContext* opCtx) {
} else {
uassert(ErrorCodes::LockTimeout,
str::stream() << "Unable to acquire ticket with mode '" << _modeForTicket
- << "' within a max lock request timeout of '"
- << *_maxLockTimeout
+ << "' within a max lock request timeout of '" << *_maxLockTimeout
<< "' milliseconds.",
_acquireTicket(opCtx, _modeForTicket, Date_t::now() + *_maxLockTimeout));
}
@@ -369,8 +368,7 @@ LockResult LockerImpl::_lockGlobalBegin(OperationContext* opCtx, LockMode mode,
uassert(ErrorCodes::LockTimeout,
str::stream() << "Unable to acquire ticket with mode '" << _modeForTicket
<< "' within a max lock request timeout of '"
- << Date_t::now() - beforeAcquire
- << "' milliseconds.",
+ << Date_t::now() - beforeAcquire << "' milliseconds.",
_acquireTicket(opCtx, mode, deadline));
}
_modeForTicket = mode;
@@ -965,8 +963,7 @@ void LockerImpl::lockComplete(OperationContext* opCtx,
uassert(ErrorCodes::LockTimeout,
str::stream() << "Unable to acquire lock '" << resId.toString() << "' within "
- << timeout
- << "' milliseconds.",
+ << timeout << "' milliseconds.",
waitTime > Milliseconds(0));
}
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index 76f51cf9a13..00297be4d37 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -939,11 +939,12 @@ namespace {
bool lockerInfoContainsLock(const Locker::LockerInfo& lockerInfo,
const ResourceId& resourceId,
const LockMode& mode) {
- return (1U == std::count_if(lockerInfo.locks.begin(),
- lockerInfo.locks.end(),
- [&resourceId, &mode](const Locker::OneLock& lock) {
- return lock.resourceId == resourceId && lock.mode == mode;
- }));
+ return (1U ==
+ std::count_if(lockerInfo.locks.begin(),
+ lockerInfo.locks.end(),
+ [&resourceId, &mode](const Locker::OneLock& lock) {
+ return lock.resourceId == resourceId && lock.mode == mode;
+ }));
}
} // namespace
diff --git a/src/mongo/db/concurrency/write_conflict_exception.cpp b/src/mongo/db/concurrency/write_conflict_exception.cpp
index c36b382b584..9eb18f8d349 100644
--- a/src/mongo/db/concurrency/write_conflict_exception.cpp
+++ b/src/mongo/db/concurrency/write_conflict_exception.cpp
@@ -48,10 +48,10 @@ WriteConflictException::WriteConflictException()
}
void WriteConflictException::logAndBackoff(int attempt, StringData operation, StringData ns) {
- mongo::logAndBackoff(
- ::mongo::logger::LogComponent::kWrite,
- logger::LogSeverity::Debug(1),
- static_cast<size_t>(attempt),
- str::stream() << "Caught WriteConflictException doing " << operation << " on " << ns);
-}
+ mongo::logAndBackoff(::mongo::logger::LogComponent::kWrite,
+ logger::LogSeverity::Debug(1),
+ static_cast<size_t>(attempt),
+ str::stream() << "Caught WriteConflictException doing " << operation
+ << " on " << ns);
}
+} // namespace mongo
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 893b67cdde8..551bed75d54 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -65,7 +65,14 @@ namespace {
// OP_QUERY find. The $orderby field is omitted because "orderby" (no dollar sign) is also allowed,
// and this requires special handling.
const std::vector<const char*> kDollarQueryModifiers = {
- "$hint", "$comment", "$max", "$min", "$returnKey", "$showDiskLoc", "$snapshot", "$maxTimeMS",
+ "$hint",
+ "$comment",
+ "$max",
+ "$min",
+ "$returnKey",
+ "$showDiskLoc",
+ "$snapshot",
+ "$maxTimeMS",
};
} // namespace
diff --git a/src/mongo/db/curop_failpoint_helpers.cpp b/src/mongo/db/curop_failpoint_helpers.cpp
index b5f9b9e9a36..6afbfb05be5 100644
--- a/src/mongo/db/curop_failpoint_helpers.cpp
+++ b/src/mongo/db/curop_failpoint_helpers.cpp
@@ -85,4 +85,4 @@ void CurOpFailpointHelpers::waitWhileFailPointEnabled(FailPoint* failPoint,
updateCurOpMsg(opCtx, origCurOpMsg);
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/curop_failpoint_helpers.h b/src/mongo/db/curop_failpoint_helpers.h
index e642f601811..a1143805951 100644
--- a/src/mongo/db/curop_failpoint_helpers.h
+++ b/src/mongo/db/curop_failpoint_helpers.h
@@ -64,4 +64,4 @@ public:
bool checkForInterrupt = false,
boost::optional<NamespaceString> nss = boost::none);
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 55c15c26593..e0b32da95f0 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -1007,8 +1007,8 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) {
if (auto svcExec = serviceContext->getServiceExecutor()) {
Status status = svcExec->shutdown(Seconds(10));
if (!status.isOK()) {
- log(LogComponent::kNetwork) << "Service executor failed to shutdown within timelimit: "
- << status.reason();
+ log(LogComponent::kNetwork)
+ << "Service executor failed to shutdown within timelimit: " << status.reason();
}
}
#endif
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index fae5952f834..079a1f32f3f 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -122,8 +122,7 @@ AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* opCtx,
str::stream()
<< "Unable to read from a snapshot due to pending collection catalog "
"changes; please retry the operation. Snapshot timestamp is "
- << mySnapshot->toString()
- << ". Collection minimum is "
+ << mySnapshot->toString() << ". Collection minimum is "
<< minSnapshot->toString());
}
diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp
index 0b0671b4ecf..d5ce4367612 100644
--- a/src/mongo/db/dbdirectclient.cpp
+++ b/src/mongo/db/dbdirectclient.cpp
@@ -48,8 +48,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
namespace {
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 5a4abebd032..db2b2f8e0bd 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -59,9 +59,9 @@
namespace mongo {
-using std::unique_ptr;
using std::set;
using std::string;
+using std::unique_ptr;
/* fetch a single object from collection ns that matches query
set your db SavedContext first
diff --git a/src/mongo/db/dbmessage.cpp b/src/mongo/db/dbmessage.cpp
index f893b53eee9..114dc00d80c 100644
--- a/src/mongo/db/dbmessage.cpp
+++ b/src/mongo/db/dbmessage.cpp
@@ -153,7 +153,7 @@ Message makeMessage(NetworkOp op, Func&& bodyBuilder) {
out.header().setLen(size);
return out;
}
-}
+} // namespace
Message makeInsertMessage(StringData ns, const BSONObj* objs, size_t count, int flags) {
return makeMessage(dbInsert, [&](BufBuilder& b) {
@@ -238,4 +238,4 @@ DbResponse replyToQuery(int queryResultFlags,
reply.bufBuilderForResults().appendBuf(data, size);
return DbResponse{reply.toQueryReply(queryResultFlags, nReturned, startingFrom, cursorId)};
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h
index 69fdfd7375a..78815dbdd42 100644
--- a/src/mongo/db/dbmessage.h
+++ b/src/mongo/db/dbmessage.h
@@ -96,7 +96,7 @@ class OperationContext;
namespace QueryResult {
#pragma pack(1)
/* see http://dochub.mongodb.org/core/mongowireprotocol
-*/
+ */
struct Layout {
MsgData::Layout msgdata;
int64_t cursorId;
@@ -298,7 +298,7 @@ enum QueryOptions {
QueryOption_CursorTailable = 1 << 1,
/** allow query of replica slave. normally these return an error except for namespace "local".
- */
+ */
QueryOption_SlaveOk = 1 << 2,
// findingStart mode is used to find the first operation of interest when
@@ -319,7 +319,7 @@ enum QueryOptions {
/** Use with QueryOption_CursorTailable. If we are at the end of the data, block for a while
* rather than returning no data. After a timeout period, we do return as normal.
- */
+ */
QueryOption_AwaitData = 1 << 5,
/** Stream the data down full blast in multiple "more" packages, on the assumption that the
diff --git a/src/mongo/db/dbmessage_test.cpp b/src/mongo/db/dbmessage_test.cpp
index b804e277407..73354253738 100644
--- a/src/mongo/db/dbmessage_test.cpp
+++ b/src/mongo/db/dbmessage_test.cpp
@@ -140,4 +140,4 @@ TEST(DBMessage1, GoodInsert2) {
}
-} // mongo namespace
+} // namespace mongo
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index cdb095ba08d..7904915ebe7 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -37,8 +37,8 @@
namespace mongo {
-using std::unique_ptr;
using std::numeric_limits;
+using std::unique_ptr;
using std::vector;
using stdx::make_unique;
diff --git a/src/mongo/db/exec/change_stream_proxy.cpp b/src/mongo/db/exec/change_stream_proxy.cpp
index 7750beeaf86..0de28de0b55 100644
--- a/src/mongo/db/exec/change_stream_proxy.cpp
+++ b/src/mongo/db/exec/change_stream_proxy.cpp
@@ -94,8 +94,7 @@ BSONObj ChangeStreamProxyStage::_validateAndConvertToBSON(const Document& event)
"event makes it impossible to resume the stream from that point. Only "
"transformations that retain the unmodified _id field are allowed. "
"Expected: "
- << BSON("_id" << resumeToken)
- << " but found: "
+ << BSON("_id" << resumeToken) << " but found: "
<< (eventBSON["_id"] ? BSON("_id" << eventBSON["_id"]) : BSONObj()),
idField.binaryEqual(resumeToken));
return eventBSON;
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index fb77bc01965..0b3f9b1c23c 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -118,8 +118,7 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
Status status(ErrorCodes::CappedPositionLost,
str::stream() << "CollectionScan died due to failure to restore "
<< "tailable cursor position. "
- << "Last seen record id: "
- << _lastSeenId);
+ << "Last seen record id: " << _lastSeenId);
*out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
return PlanStage::FAILURE;
}
@@ -222,8 +221,7 @@ void CollectionScan::doRestoreStateRequiresCollection() {
uassert(ErrorCodes::CappedPositionLost,
str::stream()
<< "CollectionScan died due to position in capped collection being deleted. "
- << "Last seen record id: "
- << _lastSeenId,
+ << "Last seen record id: " << _lastSeenId,
couldRestore);
}
}
diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp
index ce19790ef19..66fabdc964c 100644
--- a/src/mongo/db/exec/count_scan.cpp
+++ b/src/mongo/db/exec/count_scan.cpp
@@ -61,7 +61,7 @@ BSONObj replaceBSONFieldNames(const BSONObj& replace, const BSONObj& fieldNames)
return bob.obj();
}
-}
+} // namespace
using std::unique_ptr;
using std::vector;
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index 72ffe359813..32c9294cdce 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -96,7 +96,7 @@ struct StoredGeometry {
BSONElement element;
GeometryContainer geometry;
};
-}
+} // namespace
/**
* Find and parse all geometry elements on the appropriate field path from the document.
@@ -556,7 +556,7 @@ private:
// Owns matcher
const unique_ptr<MatchExpression> _matcher;
};
-}
+} // namespace
static double min2DBoundsIncrement(const GeoNearExpression& query,
const IndexDescriptor* twoDIndex) {
@@ -591,9 +591,9 @@ static R2Annulus projectBoundsToTwoDDegrees(R2Annulus sphereBounds) {
}
StatusWith<NearStage::CoveredInterval*> //
- GeoNear2DStage::nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) {
+GeoNear2DStage::nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) {
// The search is finished if we searched at least once and all the way to the edge
if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
return StatusWith<CoveredInterval*>(NULL);
@@ -830,7 +830,7 @@ S2Region* buildS2Region(const R2Annulus& sphereBounds) {
// Takes ownership of caps
return new S2RegionIntersection(&regions);
}
-}
+} // namespace
// Estimate the density of data by search the nearest cells level by level around center.
class GeoNear2DSphereStage::DensityEstimator {
@@ -1011,9 +1011,9 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* opCtx,
}
StatusWith<NearStage::CoveredInterval*> //
- GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) {
+GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) {
// The search is finished if we searched at least once and all the way to the edge
if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
return StatusWith<CoveredInterval*>(NULL);
diff --git a/src/mongo/db/exec/queued_data_stage_test.cpp b/src/mongo/db/exec/queued_data_stage_test.cpp
index 257423679a7..d441954f8f6 100644
--- a/src/mongo/db/exec/queued_data_stage_test.cpp
+++ b/src/mongo/db/exec/queued_data_stage_test.cpp
@@ -124,4 +124,4 @@ TEST_F(QueuedDataStageTest, validateStats) {
unique_ptr<PlanStageStats> allStats(mock->getStats());
ASSERT_TRUE(stats->isEOF);
}
-}
+} // namespace
diff --git a/src/mongo/db/exec/record_store_fast_count.h b/src/mongo/db/exec/record_store_fast_count.h
index ab601569cd4..973165969be 100644
--- a/src/mongo/db/exec/record_store_fast_count.h
+++ b/src/mongo/db/exec/record_store_fast_count.h
@@ -75,4 +75,4 @@ private:
CountStats _specificStats;
};
-} // namepace mongo
+} // namespace mongo
diff --git a/src/mongo/db/exec/requires_collection_stage.cpp b/src/mongo/db/exec/requires_collection_stage.cpp
index 3d77b61870a..060722dbe14 100644
--- a/src/mongo/db/exec/requires_collection_stage.cpp
+++ b/src/mongo/db/exec/requires_collection_stage.cpp
@@ -61,8 +61,7 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() {
// a rename has happened during yield.
uassert(ErrorCodes::QueryPlanKilled,
str::stream() << "collection renamed from '" << _nss << "' to '" << *newNss
- << "'. UUID "
- << _collectionUUID,
+ << "'. UUID " << _collectionUUID,
*newNss == _nss);
// At this point we know that the collection name has not changed, and therefore we have
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index c313f3b592d..687abb12964 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -65,8 +65,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
using stdx::make_unique;
@@ -283,11 +283,9 @@ public:
str::stream() << "Can't find index: " << keyPatternObj,
!indexes.empty());
uassert(ErrorCodes::AmbiguousIndexKeyPattern,
- str::stream() << indexes.size() << " matching indexes for key pattern: "
- << keyPatternObj
- << ". Conflicting indexes: "
- << indexes[0]->infoObj()
- << ", "
+ str::stream() << indexes.size()
+ << " matching indexes for key pattern: " << keyPatternObj
+ << ". Conflicting indexes: " << indexes[0]->infoObj() << ", "
<< indexes[1]->infoObj(),
indexes.size() == 1);
desc = indexes[0];
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index 52fc60a53a1..eef2993d7e2 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -45,9 +45,9 @@
namespace mongo {
+using std::string;
using std::unique_ptr;
using std::vector;
-using std::string;
using stdx::make_unique;
using fts::FTSSpec;
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index d77676122a8..a7b307bf49c 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -116,8 +116,7 @@ void assertRequiredPathsPresent(const mb::Document& document, const FieldRefSet&
uassert(ErrorCodes::NoSuchKey,
str::stream() << "After applying the update, the new document was missing the "
"required field '"
- << (*path).dottedField()
- << "'",
+ << (*path).dottedField() << "'",
elem.ok());
uassert(
ErrorCodes::NotSingleValueField,
diff --git a/src/mongo/db/exec/write_stage_common.h b/src/mongo/db/exec/write_stage_common.h
index 2f59e755c7a..1d3934443e6 100644
--- a/src/mongo/db/exec/write_stage_common.h
+++ b/src/mongo/db/exec/write_stage_common.h
@@ -54,5 +54,5 @@ bool ensureStillMatches(const Collection* collection,
WorkingSet* ws,
WorkingSetID id,
const CanonicalQuery* cq);
-}
-}
+} // namespace write_stage_common
+} // namespace mongo
diff --git a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
index 7adf5c74dcd..ad98dcfdc35 100644
--- a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
+++ b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
@@ -51,9 +51,7 @@ std::unique_ptr<DBClientBase> connect(StringData appName) {
void setWaitWithPinnedCursorDuringGetMoreBatchFailpoint(DBClientBase* conn, bool enable) {
auto cmdObj = BSON("configureFailPoint"
<< "waitWithPinnedCursorDuringGetMoreBatch"
- << "mode"
- << (enable ? "alwaysOn" : "off")
- << "data"
+ << "mode" << (enable ? "alwaysOn" : "off") << "data"
<< BSON("shouldNotdropLock" << true));
auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody("admin", cmdObj));
ASSERT_OK(getStatusFromCommandResult(reply->getCommandReply()));
@@ -63,8 +61,7 @@ void setWaitBeforeUnpinningOrDeletingCursorAfterGetMoreBatchFailpoint(DBClientBa
bool enable) {
auto cmdObj = BSON("configureFailPoint"
<< "waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch"
- << "mode"
- << (enable ? "alwaysOn" : "off"));
+ << "mode" << (enable ? "alwaysOn" : "off"));
auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody("admin", cmdObj));
ASSERT_OK(getStatusFromCommandResult(reply->getCommandReply()));
}
@@ -158,12 +155,9 @@ TEST(CurrentOpExhaustCursorTest, CanSeeEachExhaustCursorPseudoGetMoreInCurrentOp
// Generate a currentOp filter based on the cursorId and the cumulative nDocsReturned.
const auto curOpMatch = BSON("command.collection"
<< "exhaust_cursor_currentop"
- << "command.getMore"
- << queryCursor->getCursorId()
- << "msg"
+ << "command.getMore" << queryCursor->getCursorId() << "msg"
<< "waitWithPinnedCursorDuringGetMoreBatch"
- << "cursor.nDocsReturned"
- << i);
+ << "cursor.nDocsReturned" << i);
// Confirm that the exhaust getMore appears in the $currentOp output.
ASSERT(confirmCurrentOpContents(conn.get(), curOpMatch, parallelWaitTimeoutMS));
diff --git a/src/mongo/db/field_parser_test.cpp b/src/mongo/db/field_parser_test.cpp
index 9627b914c6b..494aad69d63 100644
--- a/src/mongo/db/field_parser_test.cpp
+++ b/src/mongo/db/field_parser_test.cpp
@@ -79,9 +79,7 @@ protected:
valLong = 1LL;
doc = BSON(aBool(valBool) << anArray(valArray) << anObj(valObj) << aDate(valDate)
- << aString(valString)
- << anOID(valOID)
- << aLong(valLong));
+ << aString(valString) << anOID(valOID) << aLong(valLong));
}
void tearDown() {}
@@ -213,9 +211,10 @@ TEST(ComplexExtraction, GetStringVector) {
BSONField<vector<string>> vectorField("testVector");
BSONObjBuilder bob;
- bob << vectorField() << BSON_ARRAY("a"
- << "b"
- << "c");
+ bob << vectorField()
+ << BSON_ARRAY("a"
+ << "b"
+ << "c");
BSONObj obj = bob.obj();
vector<string> parsedVector;
@@ -266,9 +265,10 @@ TEST(ComplexExtraction, RoundTripVector) {
BSONObj obj;
{
BSONObjBuilder bob;
- bob << vectorField() << BSON_ARRAY("a"
- << "b"
- << "c");
+ bob << vectorField()
+ << BSON_ARRAY("a"
+ << "b"
+ << "c");
obj = bob.obj();
}
@@ -295,12 +295,13 @@ TEST(ComplexExtraction, GetStringMap) {
BSONField<map<string, string>> mapField("testMap");
BSONObjBuilder bob;
- bob << mapField() << BSON("a"
- << "a"
- << "b"
- << "b"
- << "c"
- << "c");
+ bob << mapField()
+ << BSON("a"
+ << "a"
+ << "b"
+ << "b"
+ << "c"
+ << "c");
BSONObj obj = bob.obj();
map<string, string> parsedMap;
@@ -317,14 +318,15 @@ TEST(ComplexExtraction, GetObjectMap) {
BSONField<map<string, BSONObj>> mapField("testMap");
BSONObjBuilder bob;
- bob << mapField() << BSON("a" << BSON("a"
- << "a")
- << "b"
- << BSON("b"
- << "b")
- << "c"
- << BSON("c"
- << "c"));
+ bob << mapField()
+ << BSON("a" << BSON("a"
+ << "a")
+ << "b"
+ << BSON("b"
+ << "b")
+ << "c"
+ << BSON("c"
+ << "c"));
BSONObj obj = bob.obj();
map<string, BSONObj> parsedMap;
@@ -347,12 +349,11 @@ TEST(ComplexExtraction, GetBadMap) {
BSONField<map<string, string>> mapField("testMap");
BSONObjBuilder bob;
- bob << mapField() << BSON("a"
- << "a"
- << "b"
- << 123
- << "c"
- << "c");
+ bob << mapField()
+ << BSON("a"
+ << "a"
+ << "b" << 123 << "c"
+ << "c");
BSONObj obj = bob.obj();
map<string, string> parsedMap;
@@ -369,12 +370,13 @@ TEST(ComplexExtraction, RoundTripMap) {
BSONObj obj;
{
BSONObjBuilder bob;
- bob << mapField() << BSON("a"
- << "a"
- << "b"
- << "b"
- << "c"
- << "c");
+ bob << mapField()
+ << BSON("a"
+ << "a"
+ << "b"
+ << "b"
+ << "c"
+ << "c");
obj = bob.obj();
}
@@ -430,9 +432,7 @@ TEST(ComplexExtraction, GetBadNestedMap) {
BSONObj nestedMapObj = BSON("a"
<< "a"
- << "b"
- << 123
- << "c"
+ << "b" << 123 << "c"
<< "c");
BSONObjBuilder bob;
diff --git a/src/mongo/db/field_ref_set.cpp b/src/mongo/db/field_ref_set.cpp
index cbfcee236d3..c55a722b64b 100644
--- a/src/mongo/db/field_ref_set.cpp
+++ b/src/mongo/db/field_ref_set.cpp
@@ -36,8 +36,8 @@
namespace mongo {
-using std::vector;
using std::string;
+using std::vector;
namespace {
@@ -52,7 +52,7 @@ StringData safeFirstPart(const FieldRef* fieldRef) {
return fieldRef->getPart(0);
}
}
-}
+} // namespace
bool FieldRefSet::FieldRefPtrLessThan::operator()(const FieldRef* l, const FieldRef* r) const {
return *l < *r;
diff --git a/src/mongo/db/free_mon/free_mon_controller.h b/src/mongo/db/free_mon/free_mon_controller.h
index 92e1edab444..9307ab7570c 100644
--- a/src/mongo/db/free_mon/free_mon_controller.h
+++ b/src/mongo/db/free_mon/free_mon_controller.h
@@ -157,33 +157,33 @@ private:
private:
/**
- * Private enum to track state.
- *
- * +-----------------------------------------------------------+
- * | v
- * +-------------+ +----------+ +----------------+ +-------+
- * | kNotStarted | --> | kStarted | --> | kStopRequested | --> | kDone |
- * +-------------+ +----------+ +----------------+ +-------+
- */
+ * Private enum to track state.
+ *
+ * +-----------------------------------------------------------+
+ * | v
+ * +-------------+ +----------+ +----------------+ +-------+
+ * | kNotStarted | --> | kStarted | --> | kStopRequested | --> | kDone |
+ * +-------------+ +----------+ +----------------+ +-------+
+ */
enum class State {
/**
- * Initial state. Either start() or stop() can be called next.
- */
+ * Initial state. Either start() or stop() can be called next.
+ */
kNotStarted,
/**
- * start() has been called. stop() should be called next.
- */
+ * start() has been called. stop() should be called next.
+ */
kStarted,
/**
- * stop() has been called, and the background thread is in progress of shutting down
- */
+ * stop() has been called, and the background thread is in progress of shutting down
+ */
kStopRequested,
/**
- * Controller has been stopped.
- */
+ * Controller has been stopped.
+ */
kDone,
};
diff --git a/src/mongo/db/free_mon/free_mon_controller_test.cpp b/src/mongo/db/free_mon/free_mon_controller_test.cpp
index 6892cd034b4..99b2e4c36de 100644
--- a/src/mongo/db/free_mon/free_mon_controller_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_controller_test.cpp
@@ -43,7 +43,6 @@
#include "mongo/base/deinitializer_context.h"
#include "mongo/bson/bson_validate.h"
#include "mongo/bson/bsonmisc.h"
-#include "mongo/bson/bsonmisc.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/client.h"
#include "mongo/db/free_mon/free_mon_op_observer.h"
@@ -119,8 +118,8 @@ public:
private:
/**
- * Private enum to ensure caller uses class correctly.
- */
+ * Private enum to ensure caller uses class correctly.
+ */
enum class State {
kNotStarted,
kStarted,
@@ -248,10 +247,9 @@ public:
if (_options.doSync) {
pf.promise.setFrom(doRegister(req));
} else {
- auto swSchedule =
- _threadPool->scheduleWork([ sharedPromise = std::move(pf.promise), req, this ](
+ auto swSchedule = _threadPool->scheduleWork(
+ [sharedPromise = std::move(pf.promise), req, this](
const executor::TaskExecutor::CallbackArgs& cbArgs) mutable {
-
sharedPromise.setWith([&] { return doRegister(req); });
});
@@ -295,10 +293,9 @@ public:
if (_options.doSync) {
pf.promise.setFrom(doMetrics(req));
} else {
- auto swSchedule =
- _threadPool->scheduleWork([ sharedPromise = std::move(pf.promise), req, this ](
+ auto swSchedule = _threadPool->scheduleWork(
+ [sharedPromise = std::move(pf.promise), req, this](
const executor::TaskExecutor::CallbackArgs& cbArgs) mutable {
-
sharedPromise.setWith([&] { return doMetrics(req); });
});
@@ -543,8 +540,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// max reporting interval
ASSERT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -555,8 +551,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 30 * 60 * 60 * 24LL))));
+ << "reportingInterval" << 30 * 60 * 60 * 24LL))));
// Positive: version 2
ASSERT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -567,8 +562,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Positive: empty registration id string
ASSERT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -579,8 +573,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: bad protocol version
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -591,8 +584,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: halt uploading
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -603,8 +595,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: large registartation id
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -614,20 +605,16 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: large URL
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
IDLParserErrorContext("foo"),
BSON("version" << 1LL << "haltMetricsUploading" << false << "id"
<< "mock123"
- << "informationalURL"
- << std::string(5000, 'b')
- << "message"
+ << "informationalURL" << std::string(5000, 'b') << "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: large message
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -636,10 +623,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "mock123"
<< "informationalURL"
<< "http://www.example.com/123"
- << "message"
- << std::string(5000, 'c')
- << "reportingInterval"
- << 1LL))));
+ << "message" << std::string(5000, 'c') << "reportingInterval" << 1LL))));
// Negative: too small a reporting interval
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -650,8 +634,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 0LL))));
+ << "reportingInterval" << 0LL))));
// Negative: too large a reporting interval
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -662,39 +645,36 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << (60LL * 60 * 24 * 30 + 1LL)))));
+ << "reportingInterval" << (60LL * 60 * 24 * 30 + 1LL)))));
}
// Positive: Ensure the response is validated correctly
TEST(FreeMonProcessorTest, TestMetricsResponseValidation) {
- ASSERT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
- IDLParserErrorContext("foo"),
+ ASSERT_OK(FreeMonProcessor::validateMetricsResponse(
+ FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
- BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
- << "msg456"
- << "reportingInterval"
- << 1LL))));
+ BSON("version" << 1LL << "haltMetricsUploading" << false
+ << "permanentlyDelete" << false << "id"
+ << "mock123"
+ << "informationalURL"
+ << "http://www.example.com/123"
+ << "message"
+ << "msg456"
+ << "reportingInterval" << 1LL))));
// Positive: Support version 2
- ASSERT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
- IDLParserErrorContext("foo"),
+ ASSERT_OK(FreeMonProcessor::validateMetricsResponse(
+ FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
- BSON("version" << 2LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
- << "msg456"
- << "reportingInterval"
- << 1LL))));
+ BSON("version" << 2LL << "haltMetricsUploading" << false
+ << "permanentlyDelete" << false << "id"
+ << "mock123"
+ << "informationalURL"
+ << "http://www.example.com/123"
+ << "message"
+ << "msg456"
+ << "reportingInterval" << 1LL))));
// Positive: Add resendRegistration
ASSERT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
@@ -707,10 +687,7 @@ TEST(FreeMonProcessorTest, TestMetricsResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL
- << "resendRegistration"
- << true))));
+ << "reportingInterval" << 1LL << "resendRegistration" << true))));
// Positive: max reporting interval
@@ -724,89 +701,74 @@ TEST(FreeMonProcessorTest, TestMetricsResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 60 * 60 * 24 * 30LL))));
+ << "reportingInterval" << 60 * 60 * 24 * 30LL))));
// Negative: bad protocol version
+ ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(
+ FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
+ BSON("version" << 42LL << "haltMetricsUploading" << false
+ << "permanentlyDelete" << false << "id"
+ << "mock123"
+ << "informationalURL"
+ << "http://www.example.com/123"
+ << "message"
+ << "msg456"
+ << "reportingInterval" << 1LL))));
+
+ // Negative: halt uploading
+ ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(
+ FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
+ BSON("version" << 1LL << "haltMetricsUploading" << true
+ << "permanentlyDelete" << false << "id"
+ << "mock123"
+ << "informationalURL"
+ << "http://www.example.com/123"
+ << "message"
+ << "msg456"
+ << "reportingInterval" << 1LL))));
+
+ // Negative: large registartation id
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
IDLParserErrorContext("foo"),
- BSON("version" << 42LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
+ BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
+ << "id" << std::string(5000, 'a') << "informationalURL"
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
- // Negative: halt uploading
+ // Negative: large URL
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
IDLParserErrorContext("foo"),
- BSON("version" << 1LL << "haltMetricsUploading" << true << "permanentlyDelete" << false
- << "id"
+ BSON("version" << 1LL << "haltMetricsUploading" << false
+
+ << "permanentlyDelete" << false << "id"
<< "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
+ << "informationalURL" << std::string(5000, 'b') << "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
- // Negative: large registartation id
+ // Negative: large message
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
IDLParserErrorContext("foo"),
BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
<< "id"
- << std::string(5000, 'a')
+ << "mock123"
<< "informationalURL"
<< "http://www.example.com/123"
- << "message"
- << "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "message" << std::string(5000, 'c') << "reportingInterval" << 1LL))));
- // Negative: large URL
+ // Negative: too small a reporting interval
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(
FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
BSON("version" << 1LL << "haltMetricsUploading" << false
-
- << "permanentlyDelete"
- << false
- << "id"
+ << "permanentlyDelete" << false << "id"
<< "mock123"
<< "informationalURL"
- << std::string(5000, 'b')
+ << "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
-
- // Negative: large message
- ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
- IDLParserErrorContext("foo"),
- BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
- << std::string(5000, 'c')
- << "reportingInterval"
- << 1LL))));
-
- // Negative: too small a reporting interval
- ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
- IDLParserErrorContext("foo"),
- BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
- << "msg456"
- << "reportingInterval"
- << 0LL))));
+ << "reportingInterval" << 0LL))));
// Negative: too large a reporting interval
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
@@ -818,8 +780,7 @@ TEST(FreeMonProcessorTest, TestMetricsResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << (60LL * 60 * 24 * 30 + 1LL)))));
+ << "reportingInterval" << (60LL * 60 * 24 * 30 + 1LL)))));
}
/**
diff --git a/src/mongo/db/free_mon/free_mon_message.h b/src/mongo/db/free_mon/free_mon_message.h
index 55b3091c34a..71a34dd84b4 100644
--- a/src/mongo/db/free_mon/free_mon_message.h
+++ b/src/mongo/db/free_mon/free_mon_message.h
@@ -67,8 +67,8 @@ enum class FreeMonMessageType {
AsyncRegisterFail,
/**
- * Unregister server from server command.
- */
+ * Unregister server from server command.
+ */
UnregisterCommand,
/**
@@ -117,24 +117,24 @@ enum class FreeMonMessageType {
*/
enum class RegistrationType {
/**
- * Do not register on start because it was not configured via commandline/config file.
- */
+ * Do not register on start because it was not configured via commandline/config file.
+ */
DoNotRegister,
/**
- * Register immediately on start since we are a standalone.
- */
+ * Register immediately on start since we are a standalone.
+ */
RegisterOnStart,
/**
- * Register after transition to becoming primary because we are in a replica set,
- * and Free Monitoring has been explicitly enabled.
- */
+ * Register after transition to becoming primary because we are in a replica set,
+ * and Free Monitoring has been explicitly enabled.
+ */
RegisterAfterOnTransitionToPrimary,
/**
- * As above, but only if we have been runtime enabled.
- */
+ * As above, but only if we have been runtime enabled.
+ */
RegisterAfterOnTransitionToPrimaryIfEnabled,
};
@@ -334,7 +334,7 @@ private:
/**
* For the messages that the caller needs to wait on, this provides a mechanism to wait on messages
* to be processed.
-*/
+ */
template <FreeMonMessageType typeT>
struct FreeMonWaitablePayloadForMessage {
using payload_type = void;
diff --git a/src/mongo/db/free_mon/free_mon_mongod.cpp b/src/mongo/db/free_mon/free_mon_mongod.cpp
index b6c150f5e64..a65dd1ecdcc 100644
--- a/src/mongo/db/free_mon/free_mon_mongod.cpp
+++ b/src/mongo/db/free_mon/free_mon_mongod.cpp
@@ -105,7 +105,6 @@ public:
reqObj.objdata(), reqObj.objdata() + reqObj.objsize());
return post("/register", data).then([](DataBuilder&& blob) {
-
if (!blob.size()) {
uasserted(ErrorCodes::FreeMonHttpTemporaryFailure, "Empty response received");
}
@@ -128,7 +127,6 @@ public:
reqObj.objdata(), reqObj.objdata() + reqObj.objsize());
return post("/metrics", data).then([](DataBuilder&& blob) {
-
if (!blob.size()) {
uasserted(ErrorCodes::FreeMonHttpTemporaryFailure, "Empty response received");
}
@@ -152,7 +150,7 @@ private:
std::string url(FreeMonEndpointURL + path.toString());
auto status = _executor->scheduleWork(
- [ promise = std::move(pf.promise), url = std::move(url), data = std::move(data), this ](
+ [promise = std::move(pf.promise), url = std::move(url), data = std::move(data), this](
const executor::TaskExecutor::CallbackArgs& cbArgs) mutable {
ConstDataRange cdr(data->data(), data->size());
try {
@@ -202,28 +200,11 @@ public:
// Try to filter server status to make it cheaper to collect. Harmless if we gather
// extra
BSON("serverStatus" << 1 << "storageEngine" << true << "extra_info" << false
- << "opLatencies"
- << false
- << "opcountersRepl"
- << false
- << "opcounters"
- << false
- << "transactions"
- << false
- << "connections"
- << false
- << "network"
- << false
- << "tcMalloc"
- << false
- << "network"
- << false
- << "wiredTiger"
- << false
- << "sharding"
- << false
- << "metrics"
- << false)) {}
+ << "opLatencies" << false << "opcountersRepl" << false
+ << "opcounters" << false << "transactions" << false
+ << "connections" << false << "network" << false << "tcMalloc"
+ << false << "network" << false << "wiredTiger" << false
+ << "sharding" << false << "metrics" << false)) {}
std::string name() const final {
return "storageEngine";
diff --git a/src/mongo/db/free_mon/free_mon_op_observer.cpp b/src/mongo/db/free_mon/free_mon_op_observer.cpp
index 09bfb3ff62c..29e380c8baa 100644
--- a/src/mongo/db/free_mon/free_mon_op_observer.cpp
+++ b/src/mongo/db/free_mon/free_mon_op_observer.cpp
@@ -42,8 +42,9 @@ bool isStandaloneOrPrimary(OperationContext* opCtx) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
const bool isReplSet =
replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
- return !isReplSet || (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
- repl::MemberState::RS_PRIMARY);
+ return !isReplSet ||
+ (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
+ repl::MemberState::RS_PRIMARY);
}
const auto getFreeMonDeleteState = OperationContext::declareDecoration<bool>();
diff --git a/src/mongo/db/free_mon/free_mon_options.h b/src/mongo/db/free_mon/free_mon_options.h
index 60203dc2b94..19f707e8b65 100644
--- a/src/mongo/db/free_mon/free_mon_options.h
+++ b/src/mongo/db/free_mon/free_mon_options.h
@@ -35,8 +35,8 @@
namespace mongo {
/**
-* Free Moniting Command line choices
-*/
+ * Free Moniting Command line choices
+ */
enum class EnableCloudStateEnum : std::int32_t {
kOn,
kOff,
diff --git a/src/mongo/db/free_mon/free_mon_processor.cpp b/src/mongo/db/free_mon/free_mon_processor.cpp
index 7013d72e244..8cb57bda42f 100644
--- a/src/mongo/db/free_mon/free_mon_processor.cpp
+++ b/src/mongo/db/free_mon/free_mon_processor.cpp
@@ -465,36 +465,29 @@ Status FreeMonProcessor::validateRegistrationResponse(const FreeMonRegistrationR
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream()
<< "Unexpected registration response protocol version, expected ("
- << kMinProtocolVersion
- << ", "
- << kMaxProtocolVersion
- << "), received '"
- << resp.getVersion()
- << "'");
+ << kMinProtocolVersion << ", " << kMaxProtocolVersion << "), received '"
+ << resp.getVersion() << "'");
}
if (resp.getId().size() >= kRegistrationIdMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Id is '" << resp.getId().size()
<< "' bytes in length, maximum allowed length is '"
- << kRegistrationIdMaxLength
- << "'");
+ << kRegistrationIdMaxLength << "'");
}
if (resp.getInformationalURL().size() >= kInformationalURLMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "InformationURL is '" << resp.getInformationalURL().size()
<< "' bytes in length, maximum allowed length is '"
- << kInformationalURLMaxLength
- << "'");
+ << kInformationalURLMaxLength << "'");
}
if (resp.getMessage().size() >= kInformationalMessageMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Message is '" << resp.getMessage().size()
<< "' bytes in length, maximum allowed length is '"
- << kInformationalMessageMaxLength
- << "'");
+ << kInformationalMessageMaxLength << "'");
}
if (resp.getUserReminder().is_initialized() &&
@@ -502,19 +495,15 @@ Status FreeMonProcessor::validateRegistrationResponse(const FreeMonRegistrationR
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "UserReminder is '" << resp.getUserReminder().get().size()
<< "' bytes in length, maximum allowed length is '"
- << kUserReminderMaxLength
- << "'");
+ << kUserReminderMaxLength << "'");
}
if (resp.getReportingInterval() < kReportingIntervalSecondsMin ||
resp.getReportingInterval() > kReportingIntervalSecondsMax) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Reporting Interval '" << resp.getReportingInterval()
- << "' must be in the range ["
- << kReportingIntervalSecondsMin
- << ","
- << kReportingIntervalSecondsMax
- << "]");
+ << "' must be in the range [" << kReportingIntervalSecondsMin
+ << "," << kReportingIntervalSecondsMax << "]");
}
// Did cloud ask us to stop uploading?
@@ -540,30 +529,24 @@ Status FreeMonProcessor::validateMetricsResponse(const FreeMonMetricsResponse& r
if (!(resp.getVersion() >= kMinProtocolVersion && resp.getVersion() <= kMaxProtocolVersion)) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Unexpected metrics response protocol version, expected ("
- << kMinProtocolVersion
- << ", "
- << kMaxProtocolVersion
- << "), received '"
- << resp.getVersion()
- << "'");
+ << kMinProtocolVersion << ", " << kMaxProtocolVersion
+ << "), received '" << resp.getVersion() << "'");
}
if (resp.getId().is_initialized() && resp.getId().get().size() >= kRegistrationIdMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Id is '" << resp.getId().get().size()
<< "' bytes in length, maximum allowed length is '"
- << kRegistrationIdMaxLength
- << "'");
+ << kRegistrationIdMaxLength << "'");
}
if (resp.getInformationalURL().is_initialized() &&
resp.getInformationalURL().get().size() >= kInformationalURLMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
- str::stream() << "InformationURL is '"
- << resp.getInformationalURL().get().size()
- << "' bytes in length, maximum allowed length is '"
- << kInformationalURLMaxLength
- << "'");
+ str::stream()
+ << "InformationURL is '" << resp.getInformationalURL().get().size()
+ << "' bytes in length, maximum allowed length is '"
+ << kInformationalURLMaxLength << "'");
}
if (resp.getMessage().is_initialized() &&
@@ -571,8 +554,7 @@ Status FreeMonProcessor::validateMetricsResponse(const FreeMonMetricsResponse& r
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Message is '" << resp.getMessage().get().size()
<< "' bytes in length, maximum allowed length is '"
- << kInformationalMessageMaxLength
- << "'");
+ << kInformationalMessageMaxLength << "'");
}
if (resp.getUserReminder().is_initialized() &&
@@ -580,19 +562,15 @@ Status FreeMonProcessor::validateMetricsResponse(const FreeMonMetricsResponse& r
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "UserReminder is '" << resp.getUserReminder().get().size()
<< "' bytes in length, maximum allowed length is '"
- << kUserReminderMaxLength
- << "'");
+ << kUserReminderMaxLength << "'");
}
if (resp.getReportingInterval() < kReportingIntervalSecondsMin ||
resp.getReportingInterval() > kReportingIntervalSecondsMax) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Reporting Interval '" << resp.getReportingInterval()
- << "' must be in the range ["
- << kReportingIntervalSecondsMin
- << ","
- << kReportingIntervalSecondsMax
- << "]");
+ << "' must be in the range [" << kReportingIntervalSecondsMin
+ << "," << kReportingIntervalSecondsMax << "]");
}
// Did cloud ask us to stop uploading?
diff --git a/src/mongo/db/free_mon/free_mon_queue_test.cpp b/src/mongo/db/free_mon/free_mon_queue_test.cpp
index ea38c7bad5c..ad6104c5126 100644
--- a/src/mongo/db/free_mon/free_mon_queue_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_queue_test.cpp
@@ -146,13 +146,11 @@ TEST_F(FreeMonQueueTest, TestQueueStop) {
auto swSchedule =
_mockThreadPool->scheduleWork([&](const executor::TaskExecutor::CallbackArgs& cbArgs) {
-
barrier.countDownAndWait();
// Try to dequeue from a stopped task queue
auto item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource());
ASSERT_FALSE(item.is_initialized());
-
});
ASSERT_OK(swSchedule.getStatus());
diff --git a/src/mongo/db/ftdc/compressor_test.cpp b/src/mongo/db/ftdc/compressor_test.cpp
index 143a6c4b391..509504037b7 100644
--- a/src/mongo/db/ftdc/compressor_test.cpp
+++ b/src/mongo/db/ftdc/compressor_test.cpp
@@ -70,18 +70,12 @@ TEST_F(FTDCCompressorTest, TestBasic) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42),
+ << "key1" << 33 << "key2" << 42),
Date_t());
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t());
ASSERT_HAS_SPACE(st);
@@ -190,112 +184,64 @@ TEST_F(FTDCCompressorTest, TestSchemaChanges) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42));
+ << "key1" << 33 << "key2" << 42));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
ASSERT_HAS_SPACE(st);
// Add Field
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47));
ASSERT_HAS_SPACE(st);
// Rename field
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key5"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key5" << 45 << "key3" << 47));
ASSERT_SCHEMA_CHANGED(st);
// Change type
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key5"
+ << "key1" << 34 << "key5"
<< "45"
- << "key3"
- << 47));
+ << "key3" << 47));
ASSERT_SCHEMA_CHANGED(st);
// Add Field
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47
- << "key7"
- << 34
- << "key9"
- << 45
- << "key13"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47 << "key7" << 34 << "key9"
+ << 45 << "key13" << 47));
ASSERT_SCHEMA_CHANGED(st);
// Remove Field
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 34
- << "key9"
- << 45
- << "key13"
- << 47));
+ << "key7" << 34 << "key9" << 45 << "key13" << 47));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 34
- << "key9"
- << 45
- << "key13"
- << 47));
+ << "key7" << 34 << "key9" << 45 << "key13" << 47));
ASSERT_HAS_SPACE(st);
// Start new batch
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 5));
+ << "key7" << 5));
ASSERT_SCHEMA_CHANGED(st);
// Change field to object
@@ -309,22 +255,19 @@ TEST_F(FTDCCompressorTest, TestSchemaChanges) {
// Change field from object to number
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 7));
+ << "key7" << 7));
ASSERT_SCHEMA_CHANGED(st);
// Change field from number to array
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << BSON_ARRAY(13 << 17)));
+ << "key7" << BSON_ARRAY(13 << 17)));
ASSERT_SCHEMA_CHANGED(st);
// Change field from array to number
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 19));
+ << "key7" << 19));
ASSERT_SCHEMA_CHANGED(st);
@@ -351,13 +294,11 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
auto st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 42));
+ << "int1" << 42));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 45));
+ << "int1" << 45));
ASSERT_HAS_SPACE(st);
// Add string field
@@ -365,8 +306,7 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
<< "joe"
<< "str2"
<< "smith"
- << "int1"
- << 47));
+ << "int1" << 47));
ASSERT_HAS_SPACE(st);
// Reset schema by renaming a int field
@@ -374,41 +314,34 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
<< "joe"
<< "str2"
<< "smith"
- << "int2"
- << 48));
+ << "int2" << 48));
ASSERT_SCHEMA_CHANGED(st);
// Remove string field
st = c.addSample(BSON("str1"
<< "joe"
- << "int2"
- << 49));
+ << "int2" << 49));
ASSERT_HAS_SPACE(st);
// Add string field as last element
st = c.addSample(BSON("str1"
<< "joe"
- << "int2"
- << 50
- << "str3"
+ << "int2" << 50 << "str3"
<< "bar"));
ASSERT_HAS_SPACE(st);
// Reset schema by renaming a int field
st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 51
- << "str3"
+ << "int1" << 51 << "str3"
<< "bar"));
ASSERT_SCHEMA_CHANGED(st);
// Remove string field as last element
st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 52));
+ << "int1" << 52));
ASSERT_HAS_SPACE(st);
@@ -419,8 +352,7 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
<< "smith"
<< "str3"
<< "foo"
- << "int1"
- << 53));
+ << "int1" << 53));
ASSERT_HAS_SPACE(st);
// Reset schema by renaming a int field
@@ -430,15 +362,13 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
<< "smith"
<< "str3"
<< "foo"
- << "int2"
- << 54));
+ << "int2" << 54));
ASSERT_SCHEMA_CHANGED(st);
// Remove 2 string fields
st = c.addSample(BSON("str1"
<< "joe"
- << "int2"
- << 55));
+ << "int2" << 55));
ASSERT_HAS_SPACE(st);
// Change string to number
@@ -448,8 +378,7 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
// Change number to string
st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 67));
+ << "int1" << 67));
ASSERT_SCHEMA_CHANGED(st);
}
@@ -459,24 +388,15 @@ TEST_F(FTDCCompressorTest, TestNumbersCompat) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42LL));
+ << "key1" << 33 << "key2" << 42LL));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34LL
- << "key2"
- << 45.0f));
+ << "key1" << 34LL << "key2" << 45.0f));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << static_cast<char>(32)
- << "key2"
- << 45.0F));
+ << "key1" << static_cast<char>(32) << "key2" << 45.0F));
ASSERT_HAS_SPACE(st);
}
@@ -500,50 +420,35 @@ TEST_F(FTDCCompressorTest, Types) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42LL));
+ << "key1" << 33 << "key2" << 42LL));
ASSERT_HAS_SPACE(st);
const char bytes[] = {0x1, 0x2, 0x3};
- BSONObj o = BSON("created" << DATENOW // date_t
- << "null"
- << BSONNULL // { a : null }
- << "undefined"
- << BSONUndefined // { a : undefined }
+ BSONObj o = BSON("created" << DATENOW // date_t
+ << "null" << BSONNULL // { a : null }
+ << "undefined" << BSONUndefined // { a : undefined }
<< "obj"
<< BSON( // nested object
"a"
<< "abc"
- << "b"
- << 123LL)
+ << "b" << 123LL)
<< "foo"
<< BSON_ARRAY("bar"
<< "baz"
- << "qux") // array of strings
- << "foo2"
- << BSON_ARRAY(5 << 6 << 7) // array of ints
- << "bindata"
- << BSONBinData(&bytes[0], 3, bdtCustom) // bindata
- << "oid"
- << OID("010203040506070809101112") // oid
- << "bool"
- << true // bool
- << "regex"
- << BSONRegEx("mongodb") // regex
- << "ref"
- << BSONDBRef("c", OID("010203040506070809101112")) // ref
- << "code"
- << BSONCode("func f() { return 1; }") // code
+ << "qux") // array of strings
+ << "foo2" << BSON_ARRAY(5 << 6 << 7) // array of ints
+ << "bindata" << BSONBinData(&bytes[0], 3, bdtCustom) // bindata
+ << "oid" << OID("010203040506070809101112") // oid
+ << "bool" << true // bool
+ << "regex" << BSONRegEx("mongodb") // regex
+ << "ref" << BSONDBRef("c", OID("010203040506070809101112")) // ref
+ << "code" << BSONCode("func f() { return 1; }") // code
<< "codewscope"
<< BSONCodeWScope("func f() { return 1; }",
BSON("c" << true)) // codew
- << "minkey"
- << MINKEY // minkey
- << "maxkey"
- << MAXKEY // maxkey
- );
+ << "minkey" << MINKEY // minkey
+ << "maxkey" << MAXKEY // maxkey
+ );
st = c.addSample(o);
ASSERT_SCHEMA_CHANGED(st);
@@ -553,17 +458,11 @@ TEST_F(FTDCCompressorTest, Types) {
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34LL
- << "key2"
- << 45.0f));
+ << "key1" << 34LL << "key2" << 45.0f));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << static_cast<char>(32)
- << "key2"
- << 45.0F));
+ << "key1" << static_cast<char>(32) << "key2" << 45.0F));
ASSERT_HAS_SPACE(st);
}
@@ -575,37 +474,25 @@ TEST_F(FTDCCompressorTest, TestFull) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42));
+ << "key1" << 33 << "key2" << 42));
ASSERT_HAS_SPACE(st);
for (size_t i = 0; i != FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << static_cast<long long int>(i * j)
- << "key2"
- << 45));
+ << "key1" << static_cast<long long int>(i * j) << "key2" << 45));
ASSERT_HAS_SPACE(st);
}
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
ASSERT_FULL(st);
// Add Value
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
ASSERT_HAS_SPACE(st);
}
}
diff --git a/src/mongo/db/ftdc/controller.h b/src/mongo/db/ftdc/controller.h
index 26d76b28ad7..5d1f2f5487a 100644
--- a/src/mongo/db/ftdc/controller.h
+++ b/src/mongo/db/ftdc/controller.h
@@ -150,14 +150,14 @@ private:
private:
/**
- * Private enum to track state.
- *
- * +-----------------------------------------------------------+
- * | v
- * +-------------+ +----------+ +----------------+ +-------+
- * | kNotStarted | --> | kStarted | --> | kStopRequested | --> | kDone |
- * +-------------+ +----------+ +----------------+ +-------+
- */
+ * Private enum to track state.
+ *
+ * +-----------------------------------------------------------+
+ * | v
+ * +-------------+ +----------+ +----------------+ +-------+
+ * | kNotStarted | --> | kStarted | --> | kStopRequested | --> | kDone |
+ * +-------------+ +----------+ +----------------+ +-------+
+ */
enum class State {
/**
* Initial state. Either start() or stop() can be called next.
diff --git a/src/mongo/db/ftdc/controller_test.cpp b/src/mongo/db/ftdc/controller_test.cpp
index 8afc65b96a3..4f67923730c 100644
--- a/src/mongo/db/ftdc/controller_test.cpp
+++ b/src/mongo/db/ftdc/controller_test.cpp
@@ -119,8 +119,8 @@ public:
private:
/**
- * Private enum to ensure caller uses class correctly.
- */
+ * Private enum to ensure caller uses class correctly.
+ */
enum class State {
kNotStarted,
kStarted,
diff --git a/src/mongo/db/ftdc/file_manager.cpp b/src/mongo/db/ftdc/file_manager.cpp
index e79d4c9febc..8962e9ae229 100644
--- a/src/mongo/db/ftdc/file_manager.cpp
+++ b/src/mongo/db/ftdc/file_manager.cpp
@@ -76,8 +76,8 @@ StatusWith<std::unique_ptr<FTDCFileManager>> FTDCFileManager::create(
boost::filesystem::create_directories(dir, ec);
if (ec) {
return {ErrorCodes::NonExistentPath,
- str::stream() << "\"" << dir.generic_string() << "\" could not be created: "
- << ec.message()};
+ str::stream() << "\"" << dir.generic_string()
+ << "\" could not be created: " << ec.message()};
}
}
@@ -233,9 +233,9 @@ Status FTDCFileManager::trimDirectory(std::vector<boost::filesystem::path>& file
boost::filesystem::remove(*it, ec);
if (ec) {
return {ErrorCodes::NonExistentPath,
- str::stream() << "\"" << (*it).generic_string()
- << "\" could not be removed during trimming: "
- << ec.message()};
+ str::stream()
+ << "\"" << (*it).generic_string()
+ << "\" could not be removed during trimming: " << ec.message()};
}
}
}
diff --git a/src/mongo/db/ftdc/file_manager_test.cpp b/src/mongo/db/ftdc/file_manager_test.cpp
index 1bb8ea78702..d9bd8aada5c 100644
--- a/src/mongo/db/ftdc/file_manager_test.cpp
+++ b/src/mongo/db/ftdc/file_manager_test.cpp
@@ -72,45 +72,34 @@ TEST_F(FTDCFileManagerTest, TestFull) {
// Test a large numbers of zeros, and incremental numbers in a full buffer
for (int j = 0; j < 10; j++) {
- ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
- BSON("name"
- << "joe"
- << "key1"
- << 3230792343LL
- << "key2"
- << 235135),
- Date_t()));
+ ASSERT_OK(
+ mgr->writeSampleAndRotateIfNeeded(client,
+ BSON("name"
+ << "joe"
+ << "key1" << 3230792343LL << "key2" << 235135),
+ Date_t()));
for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
- ASSERT_OK(
- mgr->writeSampleAndRotateIfNeeded(client,
- BSON("name"
- << "joe"
- << "key1"
- << static_cast<long long int>(i * j * 37)
- << "key2"
- << static_cast<long long int>(i *
- (645 << j))),
- Date_t()));
+ ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(
+ client,
+ BSON("name"
+ << "joe"
+ << "key1" << static_cast<long long int>(i * j * 37) << "key2"
+ << static_cast<long long int>(i * (645 << j))),
+ Date_t()));
}
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
}
@@ -175,9 +164,7 @@ TEST_F(FTDCFileManagerTest, TestNormalRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 3230792343LL
- << "key2"
+ << "key1" << 3230792343LL << "key2"
<< 235135),
Date_t()));
@@ -187,9 +174,7 @@ TEST_F(FTDCFileManagerTest, TestNormalRestart) {
client,
BSON("name"
<< "joe"
- << "key1"
- << static_cast<long long int>(i * j * 37)
- << "key2"
+ << "key1" << static_cast<long long int>(i * j * 37) << "key2"
<< static_cast<long long int>(i * (645 << j))),
Date_t()));
}
@@ -197,20 +182,14 @@ TEST_F(FTDCFileManagerTest, TestNormalRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
}
@@ -245,9 +224,7 @@ TEST_F(FTDCFileManagerTest, TestCorruptCrashRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 3230792343LL
- << "key2"
+ << "key1" << 3230792343LL << "key2"
<< 235135),
Date_t()));
@@ -257,9 +234,7 @@ TEST_F(FTDCFileManagerTest, TestCorruptCrashRestart) {
client,
BSON("name"
<< "joe"
- << "key1"
- << static_cast<long long int>(i * j * 37)
- << "key2"
+ << "key1" << static_cast<long long int>(i * j * 37) << "key2"
<< static_cast<long long int>(i * (645 << j))),
Date_t()));
}
@@ -267,20 +242,14 @@ TEST_F(FTDCFileManagerTest, TestCorruptCrashRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
}
@@ -311,23 +280,14 @@ TEST_F(FTDCFileManagerTest, TestNormalCrashInterim) {
BSONObj mdoc1 = BSON("name"
<< "some_metadata"
- << "key1"
- << 34
- << "something"
- << 98);
+ << "key1" << 34 << "something" << 98);
BSONObj sdoc1 = BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45);
+ << "key1" << 34 << "key2" << 45);
BSONObj sdoc2 = BSON("name"
<< "joe"
- << "key3"
- << 34
- << "key5"
- << 45);
+ << "key3" << 34 << "key5" << 45);
boost::filesystem::path fileOut;
diff --git a/src/mongo/db/ftdc/file_reader.cpp b/src/mongo/db/ftdc/file_reader.cpp
index 23d468aac0f..b71257e4278 100644
--- a/src/mongo/db/ftdc/file_reader.cpp
+++ b/src/mongo/db/ftdc/file_reader.cpp
@@ -195,8 +195,7 @@ StatusWith<BSONObj> FTDCFileReader::readDocument() {
if (readSize != _stream.gcount()) {
return {ErrorCodes::FileStreamFailed,
str::stream() << "Failed to read " << readSize << " bytes from file \""
- << _file.generic_string()
- << "\""};
+ << _file.generic_string() << "\""};
}
ConstDataRange cdr(_buffer.data(), _buffer.data() + bsonLength);
diff --git a/src/mongo/db/ftdc/file_writer.cpp b/src/mongo/db/ftdc/file_writer.cpp
index be4ea127b3b..24a6bf4cb17 100644
--- a/src/mongo/db/ftdc/file_writer.cpp
+++ b/src/mongo/db/ftdc/file_writer.cpp
@@ -210,8 +210,7 @@ Status FTDCFileWriter::flush(const boost::optional<ConstDataRange>& range, Date_
if (ec) {
return {ErrorCodes::NonExistentPath,
str::stream() << "\"" << _interimFile.generic_string()
- << "\" could not be removed during flush: "
- << ec.message()};
+ << "\" could not be removed during flush: " << ec.message()};
}
return Status::OK();
diff --git a/src/mongo/db/ftdc/file_writer_test.cpp b/src/mongo/db/ftdc/file_writer_test.cpp
index 5da93d7026d..545741aa639 100644
--- a/src/mongo/db/ftdc/file_writer_test.cpp
+++ b/src/mongo/db/ftdc/file_writer_test.cpp
@@ -60,16 +60,10 @@ TEST_F(FTDCFileTest, TestFileBasicMetadata) {
BSONObj doc1 = BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45);
+ << "key1" << 34 << "key2" << 45);
BSONObj doc2 = BSON("name"
<< "joe"
- << "key3"
- << 34
- << "key5"
- << 45);
+ << "key3" << 34 << "key5" << 45);
FTDCConfig config;
FTDCFileWriter writer(&config);
@@ -111,16 +105,10 @@ TEST_F(FTDCFileTest, TestFileBasicCompress) {
BSONObj doc1 = BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45);
+ << "key1" << 34 << "key2" << 45);
BSONObj doc2 = BSON("name"
<< "joe"
- << "key3"
- << 34
- << "key5"
- << 45);
+ << "key3" << 34 << "key5" << 45);
FTDCConfig config;
FTDCFileWriter writer(&config);
@@ -216,69 +204,41 @@ TEST_F(FTDCFileTest, TestSchemaChanges) {
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42));
+ << "key1" << 33 << "key2" << 42));
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
// Add Value
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47));
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47));
// Rename field
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key5"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key5" << 45 << "key3" << 47));
// Change type
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key5"
+ << "key1" << 34 << "key5"
<< "45"
- << "key3"
- << 47));
+ << "key3" << 47));
// RemoveField
c.addSample(BSON("name"
<< "joe"
<< "key5"
<< "45"
- << "key3"
- << 47));
+ << "key3" << 47));
}
// Test a full buffer
@@ -289,34 +249,22 @@ TEST_F(FTDCFileTest, TestFull) {
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42));
+ << "key1" << 33 << "key2" << 42));
for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << static_cast<long long int>(i * j)
- << "key2"
- << 45));
+ << "key1" << static_cast<long long int>(i * j) << "key2" << 45));
}
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
// Add Value
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
}
}
diff --git a/src/mongo/db/ftdc/ftdc_system_stats.h b/src/mongo/db/ftdc/ftdc_system_stats.h
index b5886fea819..bdc2e87984c 100644
--- a/src/mongo/db/ftdc/ftdc_system_stats.h
+++ b/src/mongo/db/ftdc/ftdc_system_stats.h
@@ -33,7 +33,6 @@
#include "mongo/base/status.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/ftdc/controller.h"
-#include "mongo/db/ftdc/controller.h"
namespace mongo {
diff --git a/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp b/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp
index e68dcff300c..638380b0bc7 100644
--- a/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp
+++ b/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp
@@ -68,7 +68,10 @@ static const std::vector<StringData> kMemKeys{
};
static const std::vector<StringData> kNetstatKeys{
- "Tcp:"_sd, "Ip:"_sd, "TcpExt:"_sd, "IpExt:"_sd,
+ "Tcp:"_sd,
+ "Ip:"_sd,
+ "TcpExt:"_sd,
+ "IpExt:"_sd,
};
/**
diff --git a/src/mongo/db/ftdc/util.cpp b/src/mongo/db/ftdc/util.cpp
index f745068fdea..9500bf62ecb 100644
--- a/src/mongo/db/ftdc/util.cpp
+++ b/src/mongo/db/ftdc/util.cpp
@@ -444,9 +444,7 @@ StatusWith<FTDCType> getBSONDocumentType(const BSONObj& obj) {
static_cast<FTDCType>(value) != FTDCType::kMetadata) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << std::string(kFTDCTypeField)
- << "' is not an expected value, found '"
- << value
- << "'"};
+ << "' is not an expected value, found '" << value << "'"};
}
return {static_cast<FTDCType>(value)};
diff --git a/src/mongo/db/ftdc/util.h b/src/mongo/db/ftdc/util.h
index 87defea80ea..4d47c610559 100644
--- a/src/mongo/db/ftdc/util.h
+++ b/src/mongo/db/ftdc/util.h
@@ -45,23 +45,23 @@ namespace mongo {
namespace FTDCBSONUtil {
/**
-* Type of FTDC document.
-*
-* NOTE: Persisted to disk via BSON Objects.
-*/
+ * Type of FTDC document.
+ *
+ * NOTE: Persisted to disk via BSON Objects.
+ */
enum class FTDCType : std::int32_t {
/**
- * A metadata document is composed of a header + an array of bson documents
- *
- * See createBSONMetadataChunkDocument
- */
+ * A metadata document is composed of a header + an array of bson documents
+ *
+ * See createBSONMetadataChunkDocument
+ */
kMetadata = 0,
/**
- * A metrics chunk is composed of a header + a compressed metric chunk.
- *
- * See createBSONMetricChunkDocument
- */
+ * A metrics chunk is composed of a header + a compressed metric chunk.
+ *
+ * See createBSONMetricChunkDocument
+ */
kMetricChunk = 1,
};
diff --git a/src/mongo/db/ftdc/varint.h b/src/mongo/db/ftdc/varint.h
index 08a064de2b4..66a4b30cab7 100644
--- a/src/mongo/db/ftdc/varint.h
+++ b/src/mongo/db/ftdc/varint.h
@@ -46,8 +46,8 @@ namespace mongo {
*/
struct FTDCVarInt {
/**
- * Maximum number of bytes an integer can compress to
- */
+ * Maximum number of bytes an integer can compress to
+ */
static const std::size_t kMaxSizeBytes64 = 10;
FTDCVarInt() = default;
diff --git a/src/mongo/db/fts/fts_element_iterator.cpp b/src/mongo/db/fts/fts_element_iterator.cpp
index ebca711dd2b..c9666f0834a 100644
--- a/src/mongo/db/fts/fts_element_iterator.cpp
+++ b/src/mongo/db/fts/fts_element_iterator.cpp
@@ -64,7 +64,7 @@ inline bool _matchPrefix(const string& dottedName, const string& weight) {
}
return str::startsWith(weight, dottedName + '.');
}
-}
+} // namespace
bool FTSElementIterator::more() {
//_currentValue = advance();
@@ -113,9 +113,10 @@ FTSIteratorValue FTSElementIterator::advance() {
// 1. parent path empty (top level): use the current field name
// 2. parent path non-empty and obj is an array: use the parent path
// 3. parent path non-empty and obj is a sub-doc: append field name to parent path
- string dottedName = (_frame._parentPath.empty() ? fieldName : _frame._isArray
- ? _frame._parentPath
- : _frame._parentPath + '.' + fieldName);
+ string dottedName =
+ (_frame._parentPath.empty()
+ ? fieldName
+ : _frame._isArray ? _frame._parentPath : _frame._parentPath + '.' + fieldName);
// Find lower bound of dottedName in _weights. lower_bound leaves us at the first
// weight that could possibly match or be a prefix of dottedName. And if this
diff --git a/src/mongo/db/fts/fts_index_format.cpp b/src/mongo/db/fts/fts_index_format.cpp
index 2bcf35ff398..98652b875d4 100644
--- a/src/mongo/db/fts/fts_index_format.cpp
+++ b/src/mongo/db/fts/fts_index_format.cpp
@@ -117,8 +117,8 @@ BSONElement extractNonFTSKeyElement(const BSONObj& obj, StringData path) {
dps::extractAllElementsAlongPath(
obj, path, indexedElements, expandArrayOnTrailingField, &arrayComponents);
uassert(ErrorCodes::CannotBuildIndexKeys,
- str::stream() << "Field '" << path << "' of text index contains an array in document: "
- << obj,
+ str::stream() << "Field '" << path
+ << "' of text index contains an array in document: " << obj,
arrayComponents.empty());
// Since there aren't any arrays, there cannot be more than one extracted element on 'path'.
@@ -166,9 +166,7 @@ void FTSIndexFormat::getKeys(const FTSSpec& spec, const BSONObj& obj, BSONObjSet
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40) {
uassert(16732,
str::stream() << "too many unique keys for a single document to"
- << " have a text index, max is "
- << term_freqs.size()
- << obj["_id"],
+ << " have a text index, max is " << term_freqs.size() << obj["_id"],
term_freqs.size() <= 400000);
}
@@ -205,9 +203,7 @@ void FTSIndexFormat::getKeys(const FTSSpec& spec, const BSONObj& obj, BSONObjSet
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40) {
uassert(16733,
str::stream() << "trying to index text where term list is too big, max is "
- << MaxKeyBSONSizeMB
- << "mb "
- << obj["_id"],
+ << MaxKeyBSONSizeMB << "mb " << obj["_id"],
keyBSONSize <= (MaxKeyBSONSizeMB * 1024 * 1024));
}
}
@@ -267,5 +263,5 @@ void FTSIndexFormat::_appendIndexKey(BSONObjBuilder& b,
b.append("", weight);
}
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_index_format.h b/src/mongo/db/fts/fts_index_format.h
index cff73d5caad..dd83e8603a8 100644
--- a/src/mongo/db/fts/fts_index_format.h
+++ b/src/mongo/db/fts/fts_index_format.h
@@ -70,5 +70,5 @@ private:
const std::string& term,
TextIndexVersion textIndexVersion);
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_index_format_test.cpp b/src/mongo/db/fts/fts_index_format_test.cpp
index b847d16dd9d..c9d6779e639 100644
--- a/src/mongo/db/fts/fts_index_format_test.cpp
+++ b/src/mongo/db/fts/fts_index_format_test.cpp
@@ -68,14 +68,12 @@ TEST(FTSIndexFormat, Simple1) {
TEST(FTSIndexFormat, ExtraBack1) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text"
- << "x"
- << 1)))));
+ << "x" << 1)))));
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
FTSIndexFormat::getKeys(spec,
BSON("data"
<< "cat"
- << "x"
- << 5),
+ << "x" << 5),
&keys);
ASSERT_EQUALS(1U, keys.size());
@@ -94,8 +92,7 @@ TEST(FTSIndexFormat, ExtraFront1) {
FTSIndexFormat::getKeys(spec,
BSON("data"
<< "cat"
- << "x"
- << 5),
+ << "x" << 5),
&keys);
ASSERT_EQUALS(1U, keys.size());
@@ -158,8 +155,7 @@ void assertEqualsIndexKeys(std::set<std::string>& expectedKeys, const BSONObjSet
TEST(FTSIndexFormat, LongWordsTextIndexVersion1) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text")
- << "textIndexVersion"
- << 1))));
+ << "textIndexVersion" << 1))));
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
string longPrefix(1024U, 'a');
// "aaa...aaacat"
@@ -188,8 +184,7 @@ TEST(FTSIndexFormat, LongWordsTextIndexVersion1) {
TEST(FTSIndexFormat, LongWordTextIndexVersion2) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text")
- << "textIndexVersion"
- << 2))));
+ << "textIndexVersion" << 2))));
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
string longPrefix(1024U, 'a');
// "aaa...aaacat"
@@ -222,8 +217,7 @@ TEST(FTSIndexFormat, LongWordTextIndexVersion2) {
TEST(FTSIndexFormat, LongWordTextIndexVersion3) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text")
- << "textIndexVersion"
- << 3))));
+ << "textIndexVersion" << 3))));
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
string longPrefix(1024U, 'a');
// "aaa...aaacat"
diff --git a/src/mongo/db/fts/fts_language.cpp b/src/mongo/db/fts/fts_language.cpp
index faa54e79333..33f5ce4d565 100644
--- a/src/mongo/db/fts/fts_language.cpp
+++ b/src/mongo/db/fts/fts_language.cpp
@@ -82,7 +82,7 @@ LanguageMap languageMapV2;
// Case-sensitive by lookup key.
typedef std::map<StringData, const FTSLanguage*> LanguageMapLegacy;
LanguageMapLegacy languageMapV1;
-}
+} // namespace
MONGO_INITIALIZER_GROUP(FTSAllLanguagesRegistered, MONGO_NO_PREREQUISITES, MONGO_NO_DEPENDENTS);
@@ -277,10 +277,10 @@ StatusWithFTSLanguage FTSLanguage::make(StringData langName, TextIndexVersion te
if (it == languageMap->end()) {
// TEXT_INDEX_VERSION_2 and above reject unrecognized language strings.
- Status status = Status(ErrorCodes::BadValue,
- str::stream() << "unsupported language: \"" << langName
- << "\" for text index version "
- << textIndexVersion);
+ Status status =
+ Status(ErrorCodes::BadValue,
+ str::stream() << "unsupported language: \"" << langName
+ << "\" for text index version " << textIndexVersion);
return StatusWithFTSLanguage(status);
}
@@ -312,5 +312,5 @@ std::unique_ptr<FTSTokenizer> UnicodeFTSLanguage::createTokenizer() const {
const FTSPhraseMatcher& UnicodeFTSLanguage::getPhraseMatcher() const {
return _unicodePhraseMatcher;
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_language.h b/src/mongo/db/fts/fts_language.h
index 47a6ab2213d..8bdcd1aa5ce 100644
--- a/src/mongo/db/fts/fts_language.h
+++ b/src/mongo/db/fts/fts_language.h
@@ -168,5 +168,5 @@ private:
extern BasicFTSLanguage languagePorterV1;
extern BasicFTSLanguage languageEnglishV2;
extern BasicFTSLanguage languageFrenchV2;
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_language_test.cpp b/src/mongo/db/fts/fts_language_test.cpp
index e229bbdf0bc..29166d88319 100644
--- a/src/mongo/db/fts/fts_language_test.cpp
+++ b/src/mongo/db/fts/fts_language_test.cpp
@@ -175,5 +175,5 @@ TEST(FTSLanguageV1, Empty) {
ASSERT(swl.getStatus().isOK());
ASSERT_EQUALS(swl.getValue()->str(), "none");
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_matcher.cpp b/src/mongo/db/fts/fts_matcher.cpp
index e14a14d4464..be9daa5801d 100644
--- a/src/mongo/db/fts/fts_matcher.cpp
+++ b/src/mongo/db/fts/fts_matcher.cpp
@@ -176,5 +176,5 @@ FTSTokenizer::Options FTSMatcher::_getTokenizerOptions() const {
return tokenizerOptions;
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_matcher.h b/src/mongo/db/fts/fts_matcher.h
index 5dbcc981109..660194a9585 100644
--- a/src/mongo/db/fts/fts_matcher.h
+++ b/src/mongo/db/fts/fts_matcher.h
@@ -112,5 +112,5 @@ private:
const FTSQueryImpl _query;
const FTSSpec _spec;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_matcher_test.cpp b/src/mongo/db/fts/fts_matcher_test.cpp
index 31f05cf2268..46c292ce55a 100644
--- a/src/mongo/db/fts/fts_matcher_test.cpp
+++ b/src/mongo/db/fts/fts_matcher_test.cpp
@@ -278,5 +278,5 @@ TEST(FTSMatcher, NegativePhrasesMatchWithCase) {
ASSERT_FALSE(docNegativePhrasesMatchWithCase("John Runs", "-\"n R\""));
ASSERT_FALSE(docNegativePhrasesMatchWithCase("John Runs", "-\"John\" -\"Running\""));
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_impl.cpp b/src/mongo/db/fts/fts_query_impl.cpp
index fffc4362fbb..8c3f2e6882c 100644
--- a/src/mongo/db/fts/fts_query_impl.cpp
+++ b/src/mongo/db/fts/fts_query_impl.cpp
@@ -203,5 +203,5 @@ BSONObj FTSQueryImpl::toBSON() const {
bob.append("negatedPhrases", getNegatedPhr());
return bob.obj();
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_impl.h b/src/mongo/db/fts/fts_query_impl.h
index d399ee73763..97cdb8388df 100644
--- a/src/mongo/db/fts/fts_query_impl.h
+++ b/src/mongo/db/fts/fts_query_impl.h
@@ -84,5 +84,5 @@ private:
std::vector<std::string> _negatedPhrases;
std::set<std::string> _termsForBounds;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_impl_test.cpp b/src/mongo/db/fts/fts_query_impl_test.cpp
index d458004b0a5..b3b4cad71f1 100644
--- a/src/mongo/db/fts/fts_query_impl_test.cpp
+++ b/src/mongo/db/fts/fts_query_impl_test.cpp
@@ -478,5 +478,5 @@ TEST(FTSQueryImpl, CloneParsedQuery) {
ASSERT(castedClone->getNegatedPhr() == q.getNegatedPhr());
ASSERT(castedClone->getTermsForBounds() == q.getTermsForBounds());
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_parser.cpp b/src/mongo/db/fts/fts_query_parser.cpp
index a346e03451b..c6038be4575 100644
--- a/src/mongo/db/fts/fts_query_parser.cpp
+++ b/src/mongo/db/fts/fts_query_parser.cpp
@@ -102,5 +102,5 @@ QueryToken::Type FTSQueryParser::getType(char c) const {
return QueryToken::TEXT;
}
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_parser.h b/src/mongo/db/fts/fts_query_parser.h
index f4bab3e7e1c..4f11799337c 100644
--- a/src/mongo/db/fts/fts_query_parser.h
+++ b/src/mongo/db/fts/fts_query_parser.h
@@ -84,5 +84,5 @@ private:
bool _previousWhiteSpace;
const StringData _raw;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_spec.cpp b/src/mongo/db/fts/fts_spec.cpp
index 20560ccdad5..c358ba4b679 100644
--- a/src/mongo/db/fts/fts_spec.cpp
+++ b/src/mongo/db/fts/fts_spec.cpp
@@ -59,9 +59,9 @@ const std::string moduleDefaultLanguage("english");
bool validateOverride(const string& override) {
// The override field can't be empty, can't be prefixed with a dollar sign, and
// can't contain a dot.
- return !override.empty()&& override[0] != '$' && override.find('.') == std::string::npos;
-}
+ return !override.empty() && override[0] != '$' && override.find('.') == std::string::npos;
}
+} // namespace
FTSSpec::FTSSpec(const BSONObj& indexInfo) {
// indexInfo is a text index spec. Text index specs pass through fixSpec() before being
@@ -90,12 +90,8 @@ FTSSpec::FTSSpec(const BSONObj& indexInfo) {
msgasserted(17364,
str::stream() << "attempt to use unsupported textIndexVersion "
<< textIndexVersionElt.numberInt()
- << "; versions supported: "
- << TEXT_INDEX_VERSION_3
- << ", "
- << TEXT_INDEX_VERSION_2
- << ", "
- << TEXT_INDEX_VERSION_1);
+ << "; versions supported: " << TEXT_INDEX_VERSION_3 << ", "
+ << TEXT_INDEX_VERSION_2 << ", " << TEXT_INDEX_VERSION_1);
}
// Initialize _defaultLanguage. Note that the FTSLanguage constructor requires
@@ -272,7 +268,7 @@ Status verifyFieldNameNotReserved(StringData s) {
return Status::OK();
}
-}
+} // namespace
StatusWith<BSONObj> FTSSpec::fixSpec(const BSONObj& spec) {
if (spec["textIndexVersion"].numberInt() == TEXT_INDEX_VERSION_1) {
@@ -406,9 +402,7 @@ StatusWith<BSONObj> FTSSpec::fixSpec(const BSONObj& spec) {
if (i->second <= 0 || i->second >= MAX_WORD_WEIGHT) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "text index weight must be in the exclusive interval (0,"
- << MAX_WORD_WEIGHT
- << ") but found: "
- << i->second};
+ << MAX_WORD_WEIGHT << ") but found: " << i->second};
}
// Verify weight refers to a valid field.
@@ -513,5 +507,5 @@ StatusWith<BSONObj> FTSSpec::fixSpec(const BSONObj& spec) {
return b.obj();
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_spec_legacy.cpp b/src/mongo/db/fts/fts_spec_legacy.cpp
index 53169f5e213..1d58c1da750 100644
--- a/src/mongo/db/fts/fts_spec_legacy.cpp
+++ b/src/mongo/db/fts/fts_spec_legacy.cpp
@@ -48,7 +48,7 @@ void _addFTSStuff(BSONObjBuilder* b) {
b->append("_fts", INDEX_NAME);
b->append("_ftsx", 1);
}
-}
+} // namespace
const FTSLanguage& FTSSpec::_getLanguageToUseV1(const BSONObj& userDoc) const {
BSONElement e = userDoc[_languageOverrideField];
@@ -240,9 +240,7 @@ StatusWith<BSONObj> FTSSpec::_fixSpecV1(const BSONObj& spec) {
if (kv.second <= 0 || kv.second >= MAX_WORD_WEIGHT) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "text index weight must be in the exclusive interval (0,"
- << MAX_WORD_WEIGHT
- << ") but found: "
- << kv.second};
+ << MAX_WORD_WEIGHT << ") but found: " << kv.second};
}
b.append(kv.first, kv.second);
}
@@ -303,5 +301,5 @@ StatusWith<BSONObj> FTSSpec::_fixSpecV1(const BSONObj& spec) {
return b.obj();
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_spec_test.cpp b/src/mongo/db/fts/fts_spec_test.cpp
index f715b6f05ec..047968f2541 100644
--- a/src/mongo/db/fts/fts_spec_test.cpp
+++ b/src/mongo/db/fts/fts_spec_test.cpp
@@ -184,8 +184,7 @@ TEST(FTSSpec, ScoreSingleField1) {
<< "text"
<< "text"
<< "text")
- << "weights"
- << BSON("title" << 10));
+ << "weights" << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -204,8 +203,7 @@ TEST(FTSSpec, ScoreMultipleField1) {
<< "text"
<< "text"
<< "text")
- << "weights"
- << BSON("title" << 10));
+ << "weights" << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -247,8 +245,7 @@ TEST(FTSSpec, ScoreRepeatWord) {
<< "text"
<< "text"
<< "text")
- << "weights"
- << BSON("title" << 10));
+ << "weights" << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -273,8 +270,7 @@ TEST(FTSSpec, Extra1) {
TEST(FTSSpec, Extra2) {
BSONObj user = BSON("key" << BSON("data"
<< "text"
- << "x"
- << 1));
+ << "x" << 1));
BSONObj fixed = assertGet(FTSSpec::fixSpec(user));
FTSSpec spec(fixed);
ASSERT_EQUALS(0U, spec.numExtraBefore());
@@ -292,8 +288,7 @@ TEST(FTSSpec, Extra3) {
ASSERT_BSONOBJ_EQ(BSON("x" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1),
+ << "_ftsx" << 1),
fixed["key"].Obj());
ASSERT_BSONOBJ_EQ(BSON("data" << 1), fixed["weights"].Obj());
@@ -520,8 +515,7 @@ TEST(FTSSpec, NestedLanguages_Wildcard) {
TEST(FTSSpec, NestedLanguages_WildcardOverride) {
BSONObj indexSpec = BSON("key" << BSON("$**"
<< "text")
- << "weights"
- << BSON("d.e.f" << 20));
+ << "weights" << BSON("d.e.f" << 20));
FTSSpec spec(assertGet(FTSSpec::fixSpec(indexSpec)));
TermFrequencyMap tfm;
@@ -598,5 +592,5 @@ TEST(FTSSpec, TextIndexLegacyLanguageRecognition) {
ASSERT_EQUALS(tfm.size(), 0U); // "the" recognized as stopword
}
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_util.cpp b/src/mongo/db/fts/fts_util.cpp
index 5ef93b16559..f9de9ae33d7 100644
--- a/src/mongo/db/fts/fts_util.cpp
+++ b/src/mongo/db/fts/fts_util.cpp
@@ -35,5 +35,5 @@ namespace fts {
const std::string INDEX_NAME = "text";
const std::string WILDCARD = "$**";
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_util.h b/src/mongo/db/fts/fts_util.h
index 71eebcbf5f3..90eaa9095f6 100644
--- a/src/mongo/db/fts/fts_util.h
+++ b/src/mongo/db/fts/fts_util.h
@@ -46,5 +46,5 @@ enum TextIndexVersion {
TEXT_INDEX_VERSION_2 = 2, // Index format with ASCII support and murmur hashing.
TEXT_INDEX_VERSION_3 = 3, // Current index format with basic Unicode support.
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stemmer.cpp b/src/mongo/db/fts/stemmer.cpp
index db5e97227da..8d54d1af104 100644
--- a/src/mongo/db/fts/stemmer.cpp
+++ b/src/mongo/db/fts/stemmer.cpp
@@ -63,5 +63,5 @@ StringData Stemmer::stem(StringData word) const {
return StringData((const char*)(sb_sym), sb_stemmer_length(_stemmer));
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stemmer.h b/src/mongo/db/fts/stemmer.h
index a5a15174a94..e3608071010 100644
--- a/src/mongo/db/fts/stemmer.h
+++ b/src/mongo/db/fts/stemmer.h
@@ -63,5 +63,5 @@ public:
private:
struct sb_stemmer* _stemmer;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stemmer_test.cpp b/src/mongo/db/fts/stemmer_test.cpp
index 42c67d7f97b..be09fe34b8c 100644
--- a/src/mongo/db/fts/stemmer_test.cpp
+++ b/src/mongo/db/fts/stemmer_test.cpp
@@ -47,5 +47,5 @@ TEST(English, Caps) {
ASSERT_EQUALS("unit", s.stem("united"));
ASSERT_EQUALS("Unite", s.stem("United"));
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stop_words.cpp b/src/mongo/db/fts/stop_words.cpp
index 48db6836736..39be67707bc 100644
--- a/src/mongo/db/fts/stop_words.cpp
+++ b/src/mongo/db/fts/stop_words.cpp
@@ -44,7 +44,7 @@ void loadStopWordMap(StringMap<std::set<std::string>>* m);
namespace {
StringMap<std::shared_ptr<StopWords>> StopWordsMap;
StopWords empty;
-}
+} // namespace
StopWords::StopWords() {}
@@ -70,5 +70,5 @@ MONGO_INITIALIZER(StopWords)(InitializerContext* context) {
}
return Status::OK();
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stop_words.h b/src/mongo/db/fts/stop_words.h
index 22835300226..6c1c1cc07e1 100644
--- a/src/mongo/db/fts/stop_words.h
+++ b/src/mongo/db/fts/stop_words.h
@@ -61,5 +61,5 @@ public:
private:
StringMap<bool> _words; // Used as a set. The values have no meaning.
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stop_words_test.cpp b/src/mongo/db/fts/stop_words_test.cpp
index 96b1e941d3b..f0fb8ec37b8 100644
--- a/src/mongo/db/fts/stop_words_test.cpp
+++ b/src/mongo/db/fts/stop_words_test.cpp
@@ -41,5 +41,5 @@ TEST(English, Basic1) {
ASSERT(englishStopWords->isStopWord("the"));
ASSERT(!englishStopWords->isStopWord("computer"));
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/tokenizer.cpp b/src/mongo/db/fts/tokenizer.cpp
index 3de9eb00689..1463dc212bf 100644
--- a/src/mongo/db/fts/tokenizer.cpp
+++ b/src/mongo/db/fts/tokenizer.cpp
@@ -132,5 +132,5 @@ Token::Type Tokenizer::_type(char c) const {
return Token::TEXT;
}
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/tokenizer.h b/src/mongo/db/fts/tokenizer.h
index 1a0e79d9425..426449724e8 100644
--- a/src/mongo/db/fts/tokenizer.h
+++ b/src/mongo/db/fts/tokenizer.h
@@ -70,5 +70,5 @@ private:
const StringData _raw;
bool _english;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/tokenizer_test.cpp b/src/mongo/db/fts/tokenizer_test.cpp
index 9f09736587a..db61f3abc7d 100644
--- a/src/mongo/db/fts/tokenizer_test.cpp
+++ b/src/mongo/db/fts/tokenizer_test.cpp
@@ -117,5 +117,5 @@ TEST(Tokenizer, Quote1French) {
ASSERT_EQUALS("s", b.data.toString());
ASSERT_EQUALS("car", c.data.toString());
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/unicode/string.cpp b/src/mongo/db/fts/unicode/string.cpp
index 201c3539d61..8b97a671d92 100644
--- a/src/mongo/db/fts/unicode/string.cpp
+++ b/src/mongo/db/fts/unicode/string.cpp
@@ -61,7 +61,7 @@ inline void appendUtf8Codepoint(char32_t codepoint, OutputIterator* outputIt) {
*(*outputIt)++ = (((codepoint >> (6 * 0)) & 0x3f) | 0x80);
}
}
-}
+} // namespace
using linenoise_utf8::copyString32to8;
using linenoise_utf8::copyString8to32;
diff --git a/src/mongo/db/fts/unicode/string_test.cpp b/src/mongo/db/fts/unicode/string_test.cpp
index 2d3a386d1ec..a2943877b28 100644
--- a/src/mongo/db/fts/unicode/string_test.cpp
+++ b/src/mongo/db/fts/unicode/string_test.cpp
@@ -66,7 +66,7 @@ auto kCaseSensitive = String::kCaseSensitive;
auto kTurkish = CaseFoldMode::kTurkish;
auto kNormal = CaseFoldMode::kNormal;
-}
+} // namespace
// Macro to preserve line numbers and arguments in error messages.
diff --git a/src/mongo/db/geo/big_polygon.cpp b/src/mongo/db/geo/big_polygon.cpp
index f0f77ab51ed..f21c96d3faf 100644
--- a/src/mongo/db/geo/big_polygon.cpp
+++ b/src/mongo/db/geo/big_polygon.cpp
@@ -228,4 +228,4 @@ bool BigSimplePolygon::Decode(Decoder* const decoder) {
bool BigSimplePolygon::DecodeWithinScope(Decoder* const decoder) {
MONGO_UNREACHABLE;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/geo/big_polygon.h b/src/mongo/db/geo/big_polygon.h
index bc0e4ce75f1..6df8d3e4fd9 100644
--- a/src/mongo/db/geo/big_polygon.h
+++ b/src/mongo/db/geo/big_polygon.h
@@ -115,4 +115,4 @@ private:
mutable std::unique_ptr<S2Polyline> _borderLine;
mutable std::unique_ptr<S2Polygon> _borderPoly;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/geo/big_polygon_test.cpp b/src/mongo/db/geo/big_polygon_test.cpp
index b29b7c3eb4a..2a42706906d 100644
--- a/src/mongo/db/geo/big_polygon_test.cpp
+++ b/src/mongo/db/geo/big_polygon_test.cpp
@@ -36,8 +36,8 @@
namespace {
using namespace mongo;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
// Helper to build a vector of S2Point
@@ -81,8 +81,7 @@ typedef PointBuilder points;
TEST(BigSimplePolygon, Basic) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// A 10x10 square centered at [0,0]
S2Polygon poly10(loopVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
@@ -95,8 +94,7 @@ TEST(BigSimplePolygon, Basic) {
// A 20x20 square centered at [0,20]
BigSimplePolygon bigPoly20Offset(loop(points() << LatLng(10.0, 30.0) << LatLng(10.0, 10.0)
- << LatLng(-10.0, 10.0)
- << LatLng(-10.0, 30.0)));
+ << LatLng(-10.0, 10.0) << LatLng(-10.0, 30.0)));
ASSERT_LESS_THAN(bigPoly20Offset.GetArea(), 2 * M_PI);
ASSERT_LESS_THAN(poly10.GetArea(), bigPoly20Offset.GetArea());
@@ -108,18 +106,15 @@ TEST(BigSimplePolygon, BasicWithHole) {
// A 30x30 square centered at [0,0] with a 20X20 hole
vector<S2Loop*> loops;
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
S2Polygon holePoly(&loops);
// A 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16.Contains(holePoly));
@@ -127,8 +122,7 @@ TEST(BigSimplePolygon, BasicWithHole) {
// A big polygon bigger than the hole.
BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24.Contains(holePoly));
ASSERT_TRUE(bigPoly24.Intersects(holePoly));
@@ -139,12 +133,10 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
vector<S2Loop*> loops;
// Border
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
// Hole
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// Shell
loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
<< LatLng(-5.0, 5.0)));
@@ -152,24 +144,21 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
// A 16X16 square centered at [0,0] containing the shell
BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16.Contains(shellPoly));
ASSERT_TRUE(bigPoly16.Intersects(shellPoly));
// Try a big polygon bigger than the hole.
BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24.Contains(shellPoly));
ASSERT_TRUE(bigPoly24.Intersects(shellPoly));
// Try a big polygon smaller than the shell.
BigSimplePolygon bigPoly8(loop(points() << LatLng(4.0, 4.0) << LatLng(4.0, -4.0)
- << LatLng(-4.0, -4.0)
- << LatLng(-4.0, 4.0)));
+ << LatLng(-4.0, -4.0) << LatLng(-4.0, 4.0)));
ASSERT_LESS_THAN(bigPoly8.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly8.Contains(shellPoly));
ASSERT_TRUE(bigPoly8.Intersects(shellPoly));
@@ -178,8 +167,7 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
TEST(BigSimplePolygon, BasicComplement) {
// Everything *not* in a 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
bigPoly20Comp.Invert();
// A 10x10 square centered at [0,0]
@@ -192,8 +180,7 @@ TEST(BigSimplePolygon, BasicComplement) {
// A 10x10 square centered at [0,20], contained by bigPoly20Comp
S2Polygon poly10Contained(loopVec(points() << LatLng(25.0, 25.0) << LatLng(25.0, 15.0)
- << LatLng(15.0, 15.0)
- << LatLng(15.0, 25.0)));
+ << LatLng(15.0, 15.0) << LatLng(15.0, 25.0)));
ASSERT_LESS_THAN(poly10Contained.GetArea(), bigPoly20Comp.GetArea());
ASSERT(bigPoly20Comp.Contains(poly10Contained));
@@ -202,8 +189,7 @@ TEST(BigSimplePolygon, BasicComplement) {
// A 30x30 square centered at [0,0], so that bigPoly20Comp contains its complement entirely,
// which is not allowed by S2.
S2Polygon poly30(loopVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
ASSERT_LESS_THAN(poly30.GetArea(), bigPoly20Comp.GetArea());
ASSERT_FALSE(bigPoly20Comp.Contains(poly30));
ASSERT_TRUE(bigPoly20Comp.Intersects(poly30));
@@ -212,8 +198,7 @@ TEST(BigSimplePolygon, BasicComplement) {
TEST(BigSimplePolygon, BasicIntersects) {
// Everything *not* in a 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
bigPoly20.Invert();
// A 10x10 square centered at [10,10] (partial overlap)
@@ -228,19 +213,16 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// A 30x30 square centered at [0,0] with a 20X20 hole
vector<S2Loop*> loops;
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
S2Polygon holePoly(&loops);
// 1. BigPolygon doesn't touch holePoly
// Everything *not* in a 40x40 square centered at [0,0]
BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
- << LatLng(-20.0, -20.0)
- << LatLng(-20.0, 20.0)));
+ << LatLng(-20.0, -20.0) << LatLng(-20.0, 20.0)));
bigPoly40Comp.Invert();
ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40Comp.Contains(holePoly));
@@ -249,8 +231,7 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 2. BigPolygon intersects holePoly
// Everything *not* in a 24X24 square centered at [0,0]
BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
bigPoly24Comp.Invert();
ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24Comp.Contains(holePoly));
@@ -259,8 +240,7 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 3. BigPolygon contains holePoly
// Everything *not* in a 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
bigPoly16Comp.Invert();
ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
ASSERT_TRUE(bigPoly16Comp.Contains(holePoly));
@@ -268,9 +248,9 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 4. BigPolygon contains the right half of holePoly
// Everything *not* in a 40x40 square centered at [0,20]
- BigSimplePolygon bigPoly40CompOffset(loop(points() << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
- << LatLng(-20.0, 0.0)
- << LatLng(-20.0, 40.0)));
+ BigSimplePolygon bigPoly40CompOffset(loop(points()
+ << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
+ << LatLng(-20.0, 0.0) << LatLng(-20.0, 40.0)));
bigPoly40CompOffset.Invert();
ASSERT_GREATER_THAN(bigPoly40CompOffset.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40CompOffset.Contains(holePoly));
@@ -282,12 +262,10 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
vector<S2Loop*> loops;
// Border
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
// Hole
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// Shell
loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
<< LatLng(-5.0, 5.0)));
@@ -296,8 +274,7 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 1. BigPolygon doesn't touch shellPoly
// Everything *not* in a 40x40 square centered at [0,0]
BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
- << LatLng(-20.0, -20.0)
- << LatLng(-20.0, 20.0)));
+ << LatLng(-20.0, -20.0) << LatLng(-20.0, 20.0)));
bigPoly40Comp.Invert();
ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40Comp.Contains(shellPoly));
@@ -306,8 +283,7 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 2. BigPolygon intersects shellPoly
// Everything *not* in a 24X24 square centered at [0,0]
BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
bigPoly24Comp.Invert();
ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24Comp.Contains(shellPoly));
@@ -316,8 +292,7 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 3. BigPolygon contains shellPoly's outer ring
// Everything *not* in a 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
bigPoly16Comp.Invert();
ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16Comp.Contains(shellPoly));
@@ -325,9 +300,9 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 4. BigPolygon contains the right half of shellPoly
// Everything *not* in a 40x40 square centered at [0,20]
- BigSimplePolygon bigPoly40CompOffset(loop(points() << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
- << LatLng(-20.0, 0.0)
- << LatLng(-20.0, 40.0)));
+ BigSimplePolygon bigPoly40CompOffset(loop(points()
+ << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
+ << LatLng(-20.0, 0.0) << LatLng(-20.0, 40.0)));
bigPoly40CompOffset.Invert();
ASSERT_GREATER_THAN(bigPoly40CompOffset.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40CompOffset.Contains(shellPoly));
@@ -335,8 +310,7 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 5. BigPolygon contain shellPoly (CW)
BigSimplePolygon bigPolyCompOffset(loop(points() << LatLng(6.0, 6.0) << LatLng(6.0, 8.0)
- << LatLng(-6.0, 8.0)
- << LatLng(-6.0, 6.0)));
+ << LatLng(-6.0, 8.0) << LatLng(-6.0, 6.0)));
ASSERT_GREATER_THAN(bigPolyCompOffset.GetArea(), 2 * M_PI);
ASSERT_TRUE(bigPolyCompOffset.Contains(shellPoly));
ASSERT_TRUE(bigPolyCompOffset.Intersects(shellPoly));
@@ -345,13 +319,11 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
TEST(BigSimplePolygon, BasicWinding) {
// A 20x20 square centered at [0,0] (CCW)
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// Everything *not* in a 20x20 square centered at [0,0] (CW)
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(10.0, -10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(10.0, -10.0)));
ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
@@ -360,13 +332,11 @@ TEST(BigSimplePolygon, BasicWinding) {
TEST(BigSimplePolygon, LineRelations) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0)
- << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
ASSERT(bigPoly20.Contains(line10));
@@ -386,14 +356,12 @@ TEST(BigSimplePolygon, LineRelations) {
TEST(BigSimplePolygon, LineRelationsComplement) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
bigPoly20Comp.Invert();
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0)
- << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly20Comp.Contains(line10));
@@ -406,8 +374,7 @@ TEST(BigSimplePolygon, LineRelationsComplement) {
// A 10x10 line circling [0,0]
S2Polyline line30(pointVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
ASSERT_TRUE(bigPoly20Comp.Contains(line30));
ASSERT_TRUE(bigPoly20Comp.Intersects(line30));
}
@@ -415,13 +382,11 @@ TEST(BigSimplePolygon, LineRelationsComplement) {
TEST(BigSimplePolygon, LineRelationsWinding) {
// Everything *not* in a 20x20 square centered at [0,0] (CW winding)
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(10.0, -10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(10.0, -10.0)));
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0)
- << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly20Comp.Contains(line10));
@@ -431,13 +396,11 @@ TEST(BigSimplePolygon, LineRelationsWinding) {
TEST(BigSimplePolygon, PolarContains) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
// Square 5 degrees from the north pole [90, 0]
S2Polygon northPoly(loopVec(points() << LatLng(85.0, 0.0) << LatLng(85.0, 90.0)
- << LatLng(85.0, 180.0)
- << LatLng(85.0, -90.0)));
+ << LatLng(85.0, 180.0) << LatLng(85.0, -90.0)));
ASSERT_LESS_THAN(bigNorthPoly.GetArea(), 2 * M_PI);
ASSERT_LESS_THAN(northPoly.GetArea(), bigNorthPoly.GetArea());
@@ -448,8 +411,7 @@ TEST(BigSimplePolygon, PolarContains) {
TEST(BigSimplePolygon, PolarContainsWithHoles) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
// Square 5 degrees from the north pole [90, 0] with a concentric hole 1 degree from the
// north pole
@@ -468,8 +430,7 @@ TEST(BigSimplePolygon, PolarContainsWithHoles) {
TEST(BigSimplePolygon, PolarIntersectsWithHoles) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
// 5-degree square with 1-degree-wide concentric hole, centered on [80.0, 0.0]
vector<S2Loop*> loops;
@@ -512,8 +473,7 @@ void checkConsistency(const BigSimplePolygon& bigPoly,
TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Big polygon smaller than a hemisphere.
BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
// Vertex point and collinear point
@@ -522,12 +482,10 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Polygon shares one edge
S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, -10.0)
- << LatLng(80.0, -10.0)));
+ << LatLng(-80.0, -10.0) << LatLng(80.0, -10.0)));
// Polygon shares a segment of one edge
S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, -10.0)
- << LatLng(50.0, -10.0)));
+ << LatLng(-50.0, -10.0) << LatLng(50.0, -10.0)));
// Line
S2Polyline line(
@@ -538,12 +496,9 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Big polygon larger than a hemisphere.
BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(-80.0, 180.0)
- << LatLng(-80.0, -90.0)
- << LatLng(80.0, -90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0) << LatLng(-80.0, 180.0)
+ << LatLng(-80.0, -90.0) << LatLng(80.0, -90.0)
+ << LatLng(80.0, 180.0) << LatLng(80.0, 90.0)));
ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
checkConsistency(bigPoly, expandedBigPoly, point);
@@ -571,18 +526,15 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
TEST(BigSimplePolygon, ShareEdgeContained) {
// Big polygon smaller than a hemisphere.
BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
// Polygon
S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 10.0)
- << LatLng(80.0, 10.0)));
+ << LatLng(-80.0, 10.0) << LatLng(80.0, 10.0)));
// Polygon shares a segment of one edge
S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, 10.0)
- << LatLng(50.0, 10.0)));
+ << LatLng(-50.0, 10.0) << LatLng(50.0, 10.0)));
// Line
S2Polyline line(
pointVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0) << LatLng(0.0, 10.0)));
@@ -592,12 +544,9 @@ TEST(BigSimplePolygon, ShareEdgeContained) {
// Big polygon larger than a hemisphere.
BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(-80.0, 180.0)
- << LatLng(-80.0, -90.0)
- << LatLng(80.0, -90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0) << LatLng(-80.0, 180.0)
+ << LatLng(-80.0, -90.0) << LatLng(80.0, -90.0)
+ << LatLng(80.0, 180.0) << LatLng(80.0, 90.0)));
ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
checkConsistency(bigPoly, expandedBigPoly, poly);
@@ -616,4 +565,4 @@ TEST(BigSimplePolygon, ShareEdgeContained) {
checkConsistency(bigPoly, expandedBigPoly, line);
checkConsistency(bigPoly, expandedBigPoly, collinearLine);
}
-}
+} // namespace
diff --git a/src/mongo/db/geo/geometry_container.cpp b/src/mongo/db/geo/geometry_container.cpp
index 5b4ade3d062..97ae2533fc8 100644
--- a/src/mongo/db/geo/geometry_container.cpp
+++ b/src/mongo/db/geo/geometry_container.cpp
@@ -46,8 +46,9 @@ bool GeometryContainer::isPoint() const {
bool GeometryContainer::supportsContains() const {
return NULL != _polygon || NULL != _box || NULL != _cap || NULL != _multiPolygon ||
- (NULL != _geometryCollection && (_geometryCollection->polygons.vector().size() > 0 ||
- _geometryCollection->multiPolygons.vector().size() > 0));
+ (NULL != _geometryCollection &&
+ (_geometryCollection->polygons.vector().size() > 0 ||
+ _geometryCollection->multiPolygons.vector().size() > 0));
}
bool GeometryContainer::hasS2Region() const {
diff --git a/src/mongo/db/geo/geoparser.cpp b/src/mongo/db/geo/geoparser.cpp
index db9e68a0c25..3640d538df9 100644
--- a/src/mongo/db/geo/geoparser.cpp
+++ b/src/mongo/db/geo/geoparser.cpp
@@ -231,8 +231,7 @@ static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem,
"Secondary loops not contained by first exterior loop - "
"secondary loops must be holes: "
<< coordinateElt.toString(false)
- << " first loop: "
- << elem.Obj().firstElement().toString(false));
+ << " first loop: " << elem.Obj().firstElement().toString(false));
}
}
diff --git a/src/mongo/db/geo/geoparser_test.cpp b/src/mongo/db/geo/geoparser_test.cpp
index 921ba70e6d6..01eba23667a 100644
--- a/src/mongo/db/geo/geoparser_test.cpp
+++ b/src/mongo/db/geo/geoparser_test.cpp
@@ -434,4 +434,4 @@ TEST(GeoParser, parseGeometryCollection) {
ASSERT_TRUE(gc.supportsContains());
}
}
-}
+} // namespace
diff --git a/src/mongo/db/geo/hash.cpp b/src/mongo/db/geo/hash.cpp
index f74a403f77b..c5b1a043677 100644
--- a/src/mongo/db/geo/hash.cpp
+++ b/src/mongo/db/geo/hash.cpp
@@ -667,19 +667,13 @@ Status GeoHashConverter::parseParameters(const BSONObj& paramDoc,
if (params->bits < 1 || params->bits > 32) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "bits for hash must be > 0 and <= 32, "
- << "but "
- << params->bits
- << " bits were specified");
+ << "but " << params->bits << " bits were specified");
}
if (params->min >= params->max) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "region for hash must be valid and have positive area, "
- << "but ["
- << params->min
- << ", "
- << params->max
- << "] "
+ << "but [" << params->min << ", " << params->max << "] "
<< "was specified");
}
@@ -774,8 +768,7 @@ GeoHash GeoHashConverter::hash(const BSONObj& o, const BSONObj* src) const {
GeoHash GeoHashConverter::hash(double x, double y) const {
uassert(16433,
str::stream() << "point not in interval of [ " << _params.min << ", " << _params.max
- << " ]"
- << causedBy(BSON_ARRAY(x << y).toString()),
+ << " ]" << causedBy(BSON_ARRAY(x << y).toString()),
x <= _params.max && x >= _params.min && y <= _params.max && y >= _params.min);
return GeoHash(convertToHashScale(x), convertToHashScale(y), _params.bits);
diff --git a/src/mongo/db/geo/hash_test.cpp b/src/mongo/db/geo/hash_test.cpp
index 1681803083f..288a0895d02 100644
--- a/src/mongo/db/geo/hash_test.cpp
+++ b/src/mongo/db/geo/hash_test.cpp
@@ -549,4 +549,4 @@ TEST(GeoHash, ClearUnusedBitsIsNoopIfNoBitsAreUnused) {
GeoHash other = geoHash.parent(32);
ASSERT_EQUALS(geoHash, other);
}
-}
+} // namespace
diff --git a/src/mongo/db/geo/r2_region_coverer.cpp b/src/mongo/db/geo/r2_region_coverer.cpp
index 4b170dbc1d3..2cdb65673f8 100644
--- a/src/mongo/db/geo/r2_region_coverer.cpp
+++ b/src/mongo/db/geo/r2_region_coverer.cpp
@@ -332,7 +332,7 @@ void getDifferenceInternal(GeoHash cellId,
}
}
}
-}
+} // namespace
void R2CellUnion::getDifference(const R2CellUnion& cellUnion) {
std::vector<GeoHash> diffCellIds;
diff --git a/src/mongo/db/geo/shapes.h b/src/mongo/db/geo/shapes.h
index ca400eaa829..be466668110 100644
--- a/src/mongo/db/geo/shapes.h
+++ b/src/mongo/db/geo/shapes.h
@@ -64,8 +64,9 @@ inline double rad2deg(const double rad) {
inline double computeXScanDistance(double y, double maxDistDegrees) {
// TODO: this overestimates for large maxDistDegrees far from the equator
- return maxDistDegrees / std::min(cos(deg2rad(std::min(+89.0, y + maxDistDegrees))),
- cos(deg2rad(std::max(-89.0, y - maxDistDegrees))));
+ return maxDistDegrees /
+ std::min(cos(deg2rad(std::min(+89.0, y + maxDistDegrees))),
+ cos(deg2rad(std::max(-89.0, y - maxDistDegrees))));
}
bool isValidLngLat(double lng, double lat);
diff --git a/src/mongo/db/hasher.h b/src/mongo/db/hasher.h
index 20519e6a58f..a4e86a1b5aa 100644
--- a/src/mongo/db/hasher.h
+++ b/src/mongo/db/hasher.h
@@ -71,4 +71,4 @@ public:
private:
BSONElementHasher();
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/hasher_test.cpp b/src/mongo/db/hasher_test.cpp
index 63ec64417af..fd02d21e12c 100644
--- a/src/mongo/db/hasher_test.cpp
+++ b/src/mongo/db/hasher_test.cpp
@@ -272,8 +272,7 @@ TEST(BSONElementHasher, HashString) {
TEST(BSONElementHasher, HashObject) {
BSONObj o = BSON("check" << BSON("a"
<< "abc"
- << "b"
- << 123LL));
+ << "b" << 123LL));
ASSERT_EQUALS(hashIt(o), 4771603801758380216LL);
o = BSON("check" << BSONObj());
diff --git a/src/mongo/db/index/btree_key_generator.cpp b/src/mongo/db/index/btree_key_generator.cpp
index a86843f80c6..95dbe8fd680 100644
--- a/src/mongo/db/index/btree_key_generator.cpp
+++ b/src/mongo/db/index/btree_key_generator.cpp
@@ -98,9 +98,7 @@ BSONElement BtreeKeyGenerator::_extractNextElement(const BSONObj& obj,
16746,
str::stream() << "Ambiguous field name found in array (do not use numeric field names in "
"embedded elements in an array), field: '"
- << arrField.fieldName()
- << "' for array: "
- << positionalInfo.arrayObj,
+ << arrField.fieldName() << "' for array: " << positionalInfo.arrayObj,
!haveObjField || !positionalInfo.hasPositionallyIndexedElt());
*arrayNestedArray = false;
diff --git a/src/mongo/db/index/btree_key_generator_test.cpp b/src/mongo/db/index/btree_key_generator_test.cpp
index 3301cc3c861..da569fdb203 100644
--- a/src/mongo/db/index/btree_key_generator_test.cpp
+++ b/src/mongo/db/index/btree_key_generator_test.cpp
@@ -43,9 +43,9 @@
#include "mongo/util/log.h"
using namespace mongo;
-using std::unique_ptr;
using std::cout;
using std::endl;
+using std::unique_ptr;
using std::vector;
namespace {
diff --git a/src/mongo/db/index/expression_params.cpp b/src/mongo/db/index/expression_params.cpp
index e47ef01a5e6..4dc0ebbb8d9 100644
--- a/src/mongo/db/index/expression_params.cpp
+++ b/src/mongo/db/index/expression_params.cpp
@@ -193,14 +193,8 @@ void ExpressionParams::initialize2dsphereParams(const BSONObj& infoObj,
massert(17395,
stream() << "unsupported geo index version { " << kIndexVersionFieldName << " : "
- << out->indexVersion
- << " }, only support versions: ["
- << S2_INDEX_VERSION_1
- << ","
- << S2_INDEX_VERSION_2
- << ","
- << S2_INDEX_VERSION_3
- << "]",
+ << out->indexVersion << " }, only support versions: [" << S2_INDEX_VERSION_1
+ << "," << S2_INDEX_VERSION_2 << "," << S2_INDEX_VERSION_3 << "]",
out->indexVersion == S2_INDEX_VERSION_3 || out->indexVersion == S2_INDEX_VERSION_2 ||
out->indexVersion == S2_INDEX_VERSION_1);
}
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index b118867a899..09b1b8e1c7b 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -92,8 +92,8 @@ const int TempKeyMaxSize = 1024;
// TODO SERVER-36385: Completely remove the key size check in 4.4
Status checkKeySize(const BSONObj& key) {
if (key.objsize() >= TempKeyMaxSize) {
- std::string msg = str::stream() << "Index key too large to index, failing " << key.objsize()
- << ' ' << redact(key);
+ std::string msg = str::stream()
+ << "Index key too large to index, failing " << key.objsize() << ' ' << redact(key);
return Status(ErrorCodes::KeyTooLong, msg);
}
return Status::OK();
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index f308c4251cb..fc6f6067484 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -397,8 +397,8 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
// other writes making up this operation are given. When index builds can cope with
// replication rollbacks, side table writes associated with a CUD operation should
// remain/rollback along with the corresponding oplog entry.
- toInsert.emplace_back(BSON(
- "op" << (op == Op::kInsert ? "i" : "d") << "key" << key << "recordId" << loc.repr()));
+ toInsert.emplace_back(BSON("op" << (op == Op::kInsert ? "i" : "d") << "key" << key
+ << "recordId" << loc.repr()));
}
if (op == Op::kInsert) {
@@ -408,9 +408,7 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
for (const auto& key : multikeyMetadataKeys) {
toInsert.emplace_back(BSON("op"
<< "i"
- << "key"
- << key
- << "recordId"
+ << "key" << key << "recordId"
<< static_cast<int64_t>(
RecordId::ReservedId::kWildcardMultikeyMetadataId)));
}
@@ -421,7 +419,7 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
// operations outside this table and in the same transaction are rolled back, this counter also
// needs to be rolled back.
opCtx->recoveryUnit()->onRollback(
- [ this, size = toInsert.size() ] { _sideWritesCounter.fetchAndSubtract(size); });
+ [this, size = toInsert.size()] { _sideWritesCounter.fetchAndSubtract(size); });
std::vector<Record> records;
for (auto& doc : toInsert) {
diff --git a/src/mongo/db/index/index_build_interceptor.h b/src/mongo/db/index/index_build_interceptor.h
index 18f98cc72cf..f8afcd4f56a 100644
--- a/src/mongo/db/index/index_build_interceptor.h
+++ b/src/mongo/db/index/index_build_interceptor.h
@@ -121,9 +121,9 @@ public:
bool areAllConstraintsChecked(OperationContext* opCtx) const;
/**
- * When an index builder wants to commit, use this to retrieve any recorded multikey paths
- * that were tracked during the build.
- */
+ * When an index builder wants to commit, use this to retrieve any recorded multikey paths
+ * that were tracked during the build.
+ */
boost::optional<MultikeyPaths> getMultikeyPaths() const;
const std::string& getSideWritesTableIdent() const;
diff --git a/src/mongo/db/index/index_descriptor.cpp b/src/mongo/db/index/index_descriptor.cpp
index e400c3c0df2..f8d70834170 100644
--- a/src/mongo/db/index/index_descriptor.cpp
+++ b/src/mongo/db/index/index_descriptor.cpp
@@ -64,7 +64,7 @@ void populateOptionsMap(std::map<StringData, BSONElement>& theMap, const BSONObj
fieldName == IndexDescriptor::kSparseFieldName || // checked specially
fieldName == IndexDescriptor::kUniqueFieldName || // check specially
fieldName == IndexDescriptor::kNamespaceFieldName // removed in 4.4
- ) {
+ ) {
continue;
}
theMap[fieldName] = e;
@@ -155,8 +155,7 @@ Status IndexDescriptor::isIndexVersionAllowedForCreation(
}
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid index specification " << indexSpec
- << "; cannot create an index with v="
- << static_cast<int>(indexVersion)};
+ << "; cannot create an index with v=" << static_cast<int>(indexVersion)};
}
IndexVersion IndexDescriptor::getDefaultIndexVersion() {
diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp
index 731e0a14123..615838142c8 100644
--- a/src/mongo/db/index/s2_access_method.cpp
+++ b/src/mongo/db/index/s2_access_method.cpp
@@ -96,30 +96,18 @@ StatusWith<BSONObj> S2AccessMethod::fixSpec(const BSONObj& specObj) {
if (!indexVersionElt.isNumber()) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid type for geo index version { " << kIndexVersionFieldName
- << " : "
- << indexVersionElt
- << " }, only versions: ["
- << S2_INDEX_VERSION_1
- << ","
- << S2_INDEX_VERSION_2
- << ","
- << S2_INDEX_VERSION_3
- << "] are supported"};
+ << " : " << indexVersionElt << " }, only versions: ["
+ << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
+ << S2_INDEX_VERSION_3 << "] are supported"};
}
if (indexVersionElt.type() == BSONType::NumberDouble &&
!std::isnormal(indexVersionElt.numberDouble())) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid value for geo index version { " << kIndexVersionFieldName
- << " : "
- << indexVersionElt
- << " }, only versions: ["
- << S2_INDEX_VERSION_1
- << ","
- << S2_INDEX_VERSION_2
- << ","
- << S2_INDEX_VERSION_3
- << "] are supported"};
+ << " : " << indexVersionElt << " }, only versions: ["
+ << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
+ << S2_INDEX_VERSION_3 << "] are supported"};
}
const auto indexVersion = indexVersionElt.numberLong();
@@ -127,15 +115,9 @@ StatusWith<BSONObj> S2AccessMethod::fixSpec(const BSONObj& specObj) {
indexVersion != S2_INDEX_VERSION_3) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "unsupported geo index version { " << kIndexVersionFieldName
- << " : "
- << indexVersionElt
- << " }, only versions: ["
- << S2_INDEX_VERSION_1
- << ","
- << S2_INDEX_VERSION_2
- << ","
- << S2_INDEX_VERSION_3
- << "] are supported"};
+ << " : " << indexVersionElt << " }, only versions: ["
+ << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
+ << S2_INDEX_VERSION_3 << "] are supported"};
}
return specObj;
diff --git a/src/mongo/db/index/s2_key_generator_test.cpp b/src/mongo/db/index/s2_key_generator_test.cpp
index b57a2b58c43..93fc8ac545d 100644
--- a/src/mongo/db/index/s2_key_generator_test.cpp
+++ b/src/mongo/db/index/s2_key_generator_test.cpp
@@ -99,8 +99,7 @@ void assertMultikeyPathsEqual(const MultikeyPaths& expectedMultikeyPaths,
const MultikeyPaths& actualMultikeyPaths) {
if (expectedMultikeyPaths != actualMultikeyPaths) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expectedMultikeyPaths)
- << ", Actual: "
- << dumpMultikeyPaths(actualMultikeyPaths));
+ << ", Actual: " << dumpMultikeyPaths(actualMultikeyPaths));
}
}
@@ -109,13 +108,11 @@ long long getCellID(int x, int y, bool multiPoint = false) {
if (multiPoint) {
obj = BSON("a" << BSON("type"
<< "MultiPoint"
- << "coordinates"
- << BSON_ARRAY(BSON_ARRAY(x << y))));
+ << "coordinates" << BSON_ARRAY(BSON_ARRAY(x << y))));
} else {
obj = BSON("a" << BSON("type"
<< "Point"
- << "coordinates"
- << BSON_ARRAY(x << y)));
+ << "coordinates" << BSON_ARRAY(x << y)));
}
BSONObj keyPattern = fromjson("{a: '2dsphere'}");
BSONObj infoObj = fromjson("{key: {a: '2dsphere'}, '2dsphereIndexVersion': 3}");
@@ -244,8 +241,7 @@ TEST(S2KeyGeneratorTest, CollationAppliedToNonGeoStringFieldBeforeGeoField) {
BSONObjSet expectedKeys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
expectedKeys.insert(BSON(""
<< "gnirts"
- << ""
- << getCellID(0, 0)));
+ << "" << getCellID(0, 0)));
assertKeysetsEqual(expectedKeys, actualKeys);
assertMultikeyPathsEqual(MultikeyPaths{std::set<size_t>{}, std::set<size_t>{}},
@@ -267,9 +263,7 @@ TEST(S2KeyGeneratorTest, CollationAppliedToAllNonGeoStringFields) {
BSONObjSet expectedKeys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
expectedKeys.insert(BSON(""
<< "gnirts"
- << ""
- << getCellID(0, 0)
- << ""
+ << "" << getCellID(0, 0) << ""
<< "2gnirts"));
assertKeysetsEqual(expectedKeys, actualKeys);
@@ -389,8 +383,9 @@ TEST(S2KeyGeneratorTest, CollationAppliedToStringsInNestedObjects) {
ExpressionKeysPrivate::getS2Keys(obj, keyPattern, params, &actualKeys, &actualMultikeyPaths);
BSONObjSet expectedKeys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
- expectedKeys.insert(BSON("" << getCellID(0, 0) << "" << BSON("c"
- << "gnirts")));
+ expectedKeys.insert(BSON("" << getCellID(0, 0) << ""
+ << BSON("c"
+ << "gnirts")));
assertKeysetsEqual(expectedKeys, actualKeys);
assertMultikeyPathsEqual(MultikeyPaths{std::set<size_t>{}, std::set<size_t>{}},
diff --git a/src/mongo/db/index/sort_key_generator_test.cpp b/src/mongo/db/index/sort_key_generator_test.cpp
index 1ec25d713a8..485a6a3d0a5 100644
--- a/src/mongo/db/index/sort_key_generator_test.cpp
+++ b/src/mongo/db/index/sort_key_generator_test.cpp
@@ -147,8 +147,7 @@ DEATH_TEST(SortKeyGeneratorTest,
MONGO_COMPILER_VARIABLE_UNUSED auto ignored =
stdx::make_unique<SortKeyGenerator>(BSON("a" << BSON("$meta"
<< "textScore"
- << "extra"
- << 1)),
+ << "extra" << 1)),
nullptr);
}
diff --git a/src/mongo/db/index_builder.h b/src/mongo/db/index_builder.h
index ba2fc769a25..11eeeea971c 100644
--- a/src/mongo/db/index_builder.h
+++ b/src/mongo/db/index_builder.h
@@ -114,4 +114,4 @@ private:
std::string _name; // name of this builder, not related to the index
static AtomicWord<unsigned> _indexBuildCount;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index bb4d2ae44c4..fb136350b84 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -85,8 +85,7 @@ void checkShardKeyRestrictions(OperationContext* opCtx,
const ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
uassert(ErrorCodes::CannotCreateIndex,
str::stream() << "cannot create unique index over " << newIdxKey
- << " with shard key pattern "
- << shardKeyPattern.toBSON(),
+ << " with shard key pattern " << shardKeyPattern.toBSON(),
shardKeyPattern.isUniqueIndexCompatible(newIdxKey));
}
@@ -163,9 +162,9 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::startIndexRe
for (auto& spec : specs) {
std::string name = spec.getStringField(IndexDescriptor::kIndexNameFieldName);
if (name.empty()) {
- return Status(
- ErrorCodes::CannotCreateIndex,
- str::stream() << "Cannot create an index for a spec '" << spec
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream()
+ << "Cannot create an index for a spec '" << spec
<< "' without a non-empty string value for the 'name' field");
}
indexNames.push_back(name);
@@ -378,8 +377,7 @@ void IndexBuildsCoordinator::assertNoIndexBuildInProgress() const {
stdx::unique_lock<stdx::mutex> lk(_mutex);
uassert(ErrorCodes::BackgroundOperationInProgressForDatabase,
str::stream() << "cannot perform operation: there are currently "
- << _allIndexBuilds.size()
- << " index builds running.",
+ << _allIndexBuilds.size() << " index builds running.",
_allIndexBuilds.size() == 0);
}
@@ -494,12 +492,11 @@ Status IndexBuildsCoordinator::_registerIndexBuild(
auto registeredIndexBuilds =
collIndexBuildsIt->second->getIndexBuildState(lk, name);
return Status(ErrorCodes::IndexBuildAlreadyInProgress,
- str::stream() << "There's already an index with name '" << name
- << "' being built on the collection: "
- << " ( "
- << replIndexBuildState->collectionUUID
- << " ). Index build: "
- << registeredIndexBuilds->buildUUID);
+ str::stream()
+ << "There's already an index with name '" << name
+ << "' being built on the collection: "
+ << " ( " << replIndexBuildState->collectionUUID
+ << " ). Index build: " << registeredIndexBuilds->buildUUID);
}
}
}
@@ -844,8 +841,7 @@ void IndexBuildsCoordinator::_runIndexBuildInner(OperationContext* opCtx,
}
fassert(51101,
status.withContext(str::stream() << "Index build: " << replState->buildUUID
- << "; Database: "
- << replState->dbName));
+ << "; Database: " << replState->dbName));
}
uassertStatusOK(status);
@@ -925,21 +921,13 @@ void IndexBuildsCoordinator::_buildIndex(OperationContext* opCtx,
invariant(db,
str::stream() << "Database not found after relocking. Index build: "
- << replState->buildUUID
- << ": "
- << nss
- << " ("
- << replState->collectionUUID
- << ")");
+ << replState->buildUUID << ": " << nss << " ("
+ << replState->collectionUUID << ")");
invariant(db->getCollection(opCtx, nss),
str::stream() << "Collection not found after relocking. Index build: "
- << replState->buildUUID
- << ": "
- << nss
- << " ("
- << replState->collectionUUID
- << ")");
+ << replState->buildUUID << ": " << nss << " ("
+ << replState->collectionUUID << ")");
// Perform the third and final drain after releasing a shared lock and reacquiring an
// exclusive lock on the database.
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index b0bea23afa9..470f6fe3d27 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -245,8 +245,7 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
return Status(ErrorCodes::IndexNotFound,
str::stream()
<< "Cannot set a new commit quorum on an index build in collection '"
- << nss
- << "' without providing any indexes.");
+ << nss << "' without providing any indexes.");
}
AutoGetCollectionForRead autoColl(opCtx, nss);
@@ -280,10 +279,9 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
buildState->indexNames.begin(), buildState->indexNames.end(), indexNames.begin());
if (buildState->indexNames.size() != indexNames.size() || !equal) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "Provided indexes are not all being "
- << "built by the same index builder in collection '"
- << nss
- << "'.");
+ str::stream()
+ << "Provided indexes are not all being "
+ << "built by the same index builder in collection '" << nss << "'.");
}
// See if the new commit quorum is satisfiable.
diff --git a/src/mongo/db/index_builds_coordinator_mongod_test.cpp b/src/mongo/db/index_builds_coordinator_mongod_test.cpp
index 5dd6938f730..75e076c39ec 100644
--- a/src/mongo/db/index_builds_coordinator_mongod_test.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod_test.cpp
@@ -96,8 +96,7 @@ std::vector<BSONObj> makeSpecs(const NamespaceString& nss, std::vector<std::stri
std::vector<BSONObj> indexSpecs;
for (auto keyName : keys) {
indexSpecs.push_back(BSON("ns" << nss.toString() << "v" << 2 << "key" << BSON(keyName << 1)
- << "name"
- << (keyName + "_1")));
+ << "name" << (keyName + "_1")));
}
return indexSpecs;
}
diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp
index 480016eed12..05c90bb7a65 100644
--- a/src/mongo/db/initialize_server_global_state.cpp
+++ b/src/mongo/db/initialize_server_global_state.cpp
@@ -213,8 +213,8 @@ MONGO_INITIALIZER_GENERAL(
("default"))
(InitializerContext*) {
using logger::LogManager;
- using logger::MessageEventEphemeral;
using logger::MessageEventDetailsEncoder;
+ using logger::MessageEventEphemeral;
using logger::MessageEventWithContextEncoder;
using logger::MessageLogDomain;
using logger::RotatableFileAppender;
@@ -254,8 +254,8 @@ MONGO_INITIALIZER_GENERAL(
exists = boost::filesystem::exists(absoluteLogpath);
} catch (boost::filesystem::filesystem_error& e) {
return Status(ErrorCodes::FileNotOpen,
- str::stream() << "Failed probe for \"" << absoluteLogpath << "\": "
- << e.code().message());
+ str::stream() << "Failed probe for \"" << absoluteLogpath
+ << "\": " << e.code().message());
}
if (exists) {
@@ -276,9 +276,7 @@ MONGO_INITIALIZER_GENERAL(
return Status(ErrorCodes::FileRenameFailed,
str::stream()
<< "Could not rename preexisting log file \""
- << absoluteLogpath
- << "\" to \""
- << renameTarget
+ << absoluteLogpath << "\" to \"" << renameTarget
<< "\"; run with --logappend or manually remove file: "
<< ec.message());
}
diff --git a/src/mongo/db/initialize_server_security_state.cpp b/src/mongo/db/initialize_server_security_state.cpp
index b5d660869c4..cb9c29b63bd 100644
--- a/src/mongo/db/initialize_server_security_state.cpp
+++ b/src/mongo/db/initialize_server_security_state.cpp
@@ -64,9 +64,7 @@ bool initializeServerSecurityGlobalState(ServiceContext* service) {
clusterAuthMode == ServerGlobalParams::ClusterAuthMode_sendX509) {
auth::setInternalUserAuthParams(
BSON(saslCommandMechanismFieldName
- << "MONGODB-X509"
- << saslCommandUserDBFieldName
- << "$external"
+ << "MONGODB-X509" << saslCommandUserDBFieldName << "$external"
<< saslCommandUserFieldName
<< getSSLManager()->getSSLConfiguration().clientSubjectName.toString()));
}
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 425d100f183..ab0b749c51e 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -49,9 +49,9 @@
namespace mongo {
-using std::unique_ptr;
using std::endl;
using std::string;
+using std::unique_ptr;
namespace {
diff --git a/src/mongo/db/keypattern.cpp b/src/mongo/db/keypattern.cpp
index 67fd8b08460..43a46da79de 100644
--- a/src/mongo/db/keypattern.cpp
+++ b/src/mongo/db/keypattern.cpp
@@ -96,8 +96,7 @@ BSONObj KeyPattern::extendRangeBound(const BSONObj& bound, bool makeUpperInclusi
BSONElement patElt = pat.next();
massert(16634,
str::stream() << "field names of bound " << bound
- << " do not match those of keyPattern "
- << _pattern,
+ << " do not match those of keyPattern " << _pattern,
srcElt.fieldNameStringData() == patElt.fieldNameStringData());
newBound.append(srcElt);
}
diff --git a/src/mongo/db/keypattern_test.cpp b/src/mongo/db/keypattern_test.cpp
index 45fef6a9e6f..fbb7c4e7af6 100644
--- a/src/mongo/db/keypattern_test.cpp
+++ b/src/mongo/db/keypattern_test.cpp
@@ -142,4 +142,4 @@ TEST(KeyPattern, GlobalMinMax) {
ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a.b.c" << -1)).globalMin(), BSON("a.b.c" << MAXKEY));
ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a.b.c" << -1)).globalMax(), BSON("a.b.c" << MINKEY));
}
-}
+} // namespace
diff --git a/src/mongo/db/keys_collection_cache.cpp b/src/mongo/db/keys_collection_cache.cpp
index 20e3273af35..c97697aea41 100644
--- a/src/mongo/db/keys_collection_cache.cpp
+++ b/src/mongo/db/keys_collection_cache.cpp
@@ -106,10 +106,8 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::getKeyById(long long key
return {ErrorCodes::KeyNotFound,
str::stream() << "Cache Reader No keys found for " << _purpose
- << " that is valid for time: "
- << forThisTime.toString()
- << " with id: "
- << keyId};
+ << " that is valid for time: " << forThisTime.toString()
+ << " with id: " << keyId};
}
StatusWith<KeysCollectionDocument> KeysCollectionCache::getKey(const LogicalTime& forThisTime) {
diff --git a/src/mongo/db/keys_collection_client.h b/src/mongo/db/keys_collection_client.h
index 54ac6fedc44..debff147f53 100644
--- a/src/mongo/db/keys_collection_client.h
+++ b/src/mongo/db/keys_collection_client.h
@@ -56,8 +56,8 @@ public:
bool useMajority) = 0;
/**
- * Directly inserts a key document to the storage
- */
+ * Directly inserts a key document to the storage
+ */
virtual Status insertNewKey(OperationContext* opCtx, const BSONObj& doc) = 0;
/**
diff --git a/src/mongo/db/keys_collection_client_direct.h b/src/mongo/db/keys_collection_client_direct.h
index 9ad5dbb7490..6e96d8e94ed 100644
--- a/src/mongo/db/keys_collection_client_direct.h
+++ b/src/mongo/db/keys_collection_client_direct.h
@@ -55,8 +55,8 @@ public:
bool useMajority) override;
/**
- * Directly inserts a key document to the storage
- */
+ * Directly inserts a key document to the storage
+ */
Status insertNewKey(OperationContext* opCtx, const BSONObj& doc) override;
/**
diff --git a/src/mongo/db/keys_collection_client_sharded.h b/src/mongo/db/keys_collection_client_sharded.h
index eabd0f2051d..111948e0139 100644
--- a/src/mongo/db/keys_collection_client_sharded.h
+++ b/src/mongo/db/keys_collection_client_sharded.h
@@ -49,8 +49,8 @@ public:
bool useMajority) override;
/**
- * Directly inserts a key document to the storage
- */
+ * Directly inserts a key document to the storage
+ */
Status insertNewKey(OperationContext* opCtx, const BSONObj& doc) override;
bool supportsMajorityReads() const final {
diff --git a/src/mongo/db/log_process_details.cpp b/src/mongo/db/log_process_details.cpp
index 9435fc24485..8f7bd8cf5ba 100644
--- a/src/mongo/db/log_process_details.cpp
+++ b/src/mongo/db/log_process_details.cpp
@@ -82,4 +82,4 @@ void logProcessDetailsForLogRotate(ServiceContext* serviceContext) {
logProcessDetails();
}
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/logical_clock.cpp b/src/mongo/db/logical_clock.cpp
index fbd87f49421..415566094d2 100644
--- a/src/mongo/db/logical_clock.cpp
+++ b/src/mongo/db/logical_clock.cpp
@@ -50,7 +50,7 @@ bool lessThanOrEqualToMaxPossibleTime(LogicalTime time, uint64_t nTicks) {
return time.asTimestamp().getSecs() <= LogicalClock::kMaxSignedInt &&
time.asTimestamp().getInc() <= (LogicalClock::kMaxSignedInt - nTicks);
}
-}
+} // namespace
LogicalTime LogicalClock::getClusterTimeForReplicaSet(OperationContext* opCtx) {
if (getGlobalReplSettings().usingReplSets()) {
@@ -166,8 +166,7 @@ Status LogicalClock::_passesRateLimiter_inlock(LogicalTime newTime) {
return Status(ErrorCodes::ClusterTimeFailsRateLimiter,
str::stream() << "New cluster time, " << newTimeSecs
<< ", is too far from this node's wall clock time, "
- << wallClockSecs
- << ".");
+ << wallClockSecs << ".");
}
uassert(40484,
diff --git a/src/mongo/db/logical_session_cache_test.cpp b/src/mongo/db/logical_session_cache_test.cpp
index d0f41415e8c..b604d776903 100644
--- a/src/mongo/db/logical_session_cache_test.cpp
+++ b/src/mongo/db/logical_session_cache_test.cpp
@@ -349,8 +349,9 @@ TEST_F(LogicalSessionCacheTest, RefreshMatrixSessionState) {
failText << " session case failed: ";
ASSERT(sessions()->has(ids[i]) == testCases[i].inCollection)
- << failText.str() << (testCases[i].inCollection ? "session wasn't in collection"
- : "session was in collection");
+ << failText.str()
+ << (testCases[i].inCollection ? "session wasn't in collection"
+ : "session was in collection");
ASSERT((service()->matchKilled(ids[i]) != nullptr) == testCases[i].killed)
<< failText.str()
<< (testCases[i].killed ? "session wasn't killed" : "session was killed");
diff --git a/src/mongo/db/logical_session_id_test.cpp b/src/mongo/db/logical_session_id_test.cpp
index 160e718201d..4e679b3639e 100644
--- a/src/mongo/db/logical_session_id_test.cpp
+++ b/src/mongo/db/logical_session_id_test.cpp
@@ -284,14 +284,14 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SessionIdAndTransact
LogicalSessionFromClient lsid;
lsid.setId(UUID::gen());
- initializeOperationSessionInfo(
- _opCtx.get(),
- BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL << "OtherField"
- << "TestField"),
- true,
- true,
- true,
- true);
+ initializeOperationSessionInfo(_opCtx.get(),
+ BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber"
+ << 100LL << "OtherField"
+ << "TestField"),
+ true,
+ true,
+ true,
+ true);
ASSERT(_opCtx->getLogicalSessionId());
ASSERT_EQ(lsid.getId(), _opCtx->getLogicalSessionId()->getId());
@@ -306,14 +306,14 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_IsReplSetMemberOrMon
lsid.setId(UUID::gen());
ASSERT_THROWS_CODE(
- initializeOperationSessionInfo(
- _opCtx.get(),
- BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL << "OtherField"
- << "TestField"),
- true,
- true,
- false,
- true),
+ initializeOperationSessionInfo(_opCtx.get(),
+ BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber"
+ << 100LL << "OtherField"
+ << "TestField"),
+ true,
+ true,
+ false,
+ true),
AssertionException,
ErrorCodes::IllegalOperation);
}
@@ -324,14 +324,14 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SupportsDocLockingFa
lsid.setId(UUID::gen());
ASSERT_THROWS_CODE(
- initializeOperationSessionInfo(
- _opCtx.get(),
- BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL << "OtherField"
- << "TestField"),
- true,
- true,
- true,
- false),
+ initializeOperationSessionInfo(_opCtx.get(),
+ BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber"
+ << 100LL << "OtherField"
+ << "TestField"),
+ true,
+ true,
+ true,
+ false),
AssertionException,
ErrorCodes::IllegalOperation);
}
diff --git a/src/mongo/db/logical_time_test.cpp b/src/mongo/db/logical_time_test.cpp
index a03497b416f..19c3d5832b5 100644
--- a/src/mongo/db/logical_time_test.cpp
+++ b/src/mongo/db/logical_time_test.cpp
@@ -28,8 +28,8 @@
*/
-#include "mongo/db/logical_time.h"
#include "mongo/bson/timestamp.h"
+#include "mongo/db/logical_time.h"
#include "mongo/db/signed_logical_time.h"
#include "mongo/db/time_proof_service.h"
#include "mongo/platform/basic.h"
@@ -119,10 +119,10 @@ TEST(LogicalTime, appendAsOperationTime) {
}
TEST(LogicalTime, fromOperationTime) {
- const auto actualTime = LogicalTime::fromOperationTime(BSON("someOtherCommandParameter"
- << "Value"
- << "operationTime"
- << Timestamp(1)));
+ const auto actualTime =
+ LogicalTime::fromOperationTime(BSON("someOtherCommandParameter"
+ << "Value"
+ << "operationTime" << Timestamp(1)));
ASSERT_EQ(LogicalTime(Timestamp(1)), actualTime);
}
diff --git a/src/mongo/db/matcher/expression.cpp b/src/mongo/db/matcher/expression.cpp
index 364ebdd68d7..649eb1a6e77 100644
--- a/src/mongo/db/matcher/expression.cpp
+++ b/src/mongo/db/matcher/expression.cpp
@@ -95,4 +95,4 @@ void MatchExpression::addDependencies(DepsTracker* deps) const {
_doAddDependencies(deps);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression.h b/src/mongo/db/matcher/expression.h
index e3e3d652b4a..de5878c115c 100644
--- a/src/mongo/db/matcher/expression.h
+++ b/src/mongo/db/matcher/expression.h
@@ -365,4 +365,4 @@ private:
MatchType _matchType;
std::unique_ptr<TagData> _tagData;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_array.cpp b/src/mongo/db/matcher/expression_array.cpp
index af7205ef822..51a06e2b81c 100644
--- a/src/mongo/db/matcher/expression_array.cpp
+++ b/src/mongo/db/matcher/expression_array.cpp
@@ -238,4 +238,4 @@ bool SizeMatchExpression::equivalent(const MatchExpression* other) const {
// ------------------
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_array.h b/src/mongo/db/matcher/expression_array.h
index 471549f105e..959d536632f 100644
--- a/src/mongo/db/matcher/expression_array.h
+++ b/src/mongo/db/matcher/expression_array.h
@@ -117,8 +117,8 @@ private:
class ElemMatchValueMatchExpression : public ArrayMatchingMatchExpression {
public:
/**
- * This constructor takes ownership of 'sub.'
- */
+ * This constructor takes ownership of 'sub.'
+ */
ElemMatchValueMatchExpression(StringData path, MatchExpression* sub);
explicit ElemMatchValueMatchExpression(StringData path);
virtual ~ElemMatchValueMatchExpression();
@@ -207,4 +207,4 @@ private:
int _size; // >= 0 real, < 0, nothing will match
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp
index 99081c3e910..f52f331989e 100644
--- a/src/mongo/db/matcher/expression_geo.cpp
+++ b/src/mongo/db/matcher/expression_geo.cpp
@@ -132,8 +132,8 @@ Status GeoExpression::parseFrom(const BSONObj& obj) {
if (GeoExpression::INTERSECT == predicate) {
if (!geoContainer->supportsProject(SPHERE)) {
return Status(ErrorCodes::BadValue,
- str::stream() << "$geoIntersect not supported with provided geometry: "
- << obj);
+ str::stream()
+ << "$geoIntersect not supported with provided geometry: " << obj);
}
geoContainer->projectInto(SPHERE);
}
@@ -218,8 +218,7 @@ Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
return Status(ErrorCodes::BadValue,
str::stream()
<< "geo near accepts just one argument when querying for a GeoJSON "
- << "point. Extra field found: "
- << objIt.next());
+ << "point. Extra field found: " << objIt.next());
}
// Parse "new" near:
@@ -247,9 +246,7 @@ Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
return Status(ErrorCodes::BadValue,
str::stream()
<< "invalid point in geo near query $geometry argument: "
- << embeddedObj
- << " "
- << status.reason());
+ << embeddedObj << " " << status.reason());
}
uassert(16681,
"$near requires geojson point, given " + embeddedObj.toString(),
@@ -326,16 +323,16 @@ Status GeoNearExpression::parseFrom(const BSONObj& obj) {
//
/**
-* Takes ownership of the passed-in GeoExpression.
-*/
+ * Takes ownership of the passed-in GeoExpression.
+ */
GeoMatchExpression::GeoMatchExpression(StringData path,
const GeoExpression* query,
const BSONObj& rawObj)
: LeafMatchExpression(GEO, path), _rawObj(rawObj), _query(query), _canSkipValidation(false) {}
/**
-* Takes shared ownership of the passed-in GeoExpression.
-*/
+ * Takes shared ownership of the passed-in GeoExpression.
+ */
GeoMatchExpression::GeoMatchExpression(StringData path,
std::shared_ptr<const GeoExpression> query,
const BSONObj& rawObj)
@@ -467,4 +464,4 @@ std::unique_ptr<MatchExpression> GeoNearMatchExpression::shallowClone() const {
}
return std::move(next);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_geo_test.cpp b/src/mongo/db/matcher/expression_geo_test.cpp
index 56ddaa674ea..6bf40daf87a 100644
--- a/src/mongo/db/matcher/expression_geo_test.cpp
+++ b/src/mongo/db/matcher/expression_geo_test.cpp
@@ -181,4 +181,4 @@ TEST(ExpressionGeoTest, GeoNearNotEquivalent) {
gne2(makeGeoNearMatchExpression(query2));
ASSERT(!gne1->equivalent(gne2.get()));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp
index 334dd0201e7..67b67dad77d 100644
--- a/src/mongo/db/matcher/expression_leaf.cpp
+++ b/src/mongo/db/matcher/expression_leaf.cpp
@@ -806,4 +806,4 @@ bool BitTestMatchExpression::equivalent(const MatchExpression* other) const {
return path() == realOther->path() && myBitPositions == otherBitPositions;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_leaf.h b/src/mongo/db/matcher/expression_leaf.h
index 931ef828d03..84d04c63a5c 100644
--- a/src/mongo/db/matcher/expression_leaf.h
+++ b/src/mongo/db/matcher/expression_leaf.h
@@ -40,7 +40,7 @@
namespace pcrecpp {
class RE;
-} // namespace pcrecpp;
+} // namespace pcrecpp
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_leaf_test.cpp b/src/mongo/db/matcher/expression_leaf_test.cpp
index bb06d26b7f7..cefa46d58b7 100644
--- a/src/mongo/db/matcher/expression_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_leaf_test.cpp
@@ -1861,4 +1861,4 @@ TEST(BitTestMatchExpression, DoesNotMatchBinaryWithBitMask) {
ASSERT(banyc.matchesSingleElement(match1["a"]));
ASSERT(banyc.matchesSingleElement(match2["a"]));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_parser.cpp b/src/mongo/db/matcher/expression_parser.cpp
index ede09820502..98a64e2d4bd 100644
--- a/src/mongo/db/matcher/expression_parser.cpp
+++ b/src/mongo/db/matcher/expression_parser.cpp
@@ -255,8 +255,8 @@ StatusWithMatchExpression parse(const BSONObj& obj,
if (!parseExpressionMatchFunction) {
return {Status(ErrorCodes::BadValue,
- str::stream() << "unknown top level operator: "
- << e.fieldNameStringData())};
+ str::stream()
+ << "unknown top level operator: " << e.fieldNameStringData())};
}
auto parsedExpression = parseExpressionMatchFunction(
@@ -569,8 +569,7 @@ StatusWith<std::vector<uint32_t>> parseBitPositionsArray(const BSONObj& theArray
return Status(
ErrorCodes::BadValue,
str::stream()
- << "bit positions cannot be represented as a 32-bit signed integer: "
- << e);
+ << "bit positions cannot be represented as a 32-bit signed integer: " << e);
}
// This checks if e is integral.
@@ -589,8 +588,7 @@ StatusWith<std::vector<uint32_t>> parseBitPositionsArray(const BSONObj& theArray
return Status(
ErrorCodes::BadValue,
str::stream()
- << "bit positions cannot be represented as a 32-bit signed integer: "
- << e);
+ << "bit positions cannot be represented as a 32-bit signed integer: " << e);
}
}
@@ -635,9 +633,9 @@ StatusWithMatchExpression parseBitTest(StringData name, BSONElement e) {
auto eBinary = e.binData(eBinaryLen);
bitTestMatchExpression = stdx::make_unique<T>(name, eBinary, eBinaryLen);
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << name << " takes an Array, a number, or a BinData but received: " << e);
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << name << " takes an Array, a number, or a BinData but received: " << e);
}
return {std::move(bitTestMatchExpression)};
@@ -692,8 +690,7 @@ StatusWithMatchExpression parseInternalSchemaRootDocEq(
if (elem.type() != BSONType::Object) {
return {Status(ErrorCodes::TypeMismatch,
str::stream() << InternalSchemaRootDocEqMatchExpression::kName
- << " must be an object, found type "
- << elem.type())};
+ << " must be an object, found type " << elem.type())};
}
auto rootDocEq =
stdx::make_unique<InternalSchemaRootDocEqMatchExpression>(elem.embeddedObject());
@@ -750,8 +747,7 @@ StatusWith<StringData> parseNamePlaceholder(const BSONObj& containingObject,
} else if (namePlaceholderElem.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
str::stream() << expressionName << " requires '" << namePlaceholderFieldName
- << "' to be a string, not "
- << namePlaceholderElem.type()};
+ << "' to be a string, not " << namePlaceholderElem.type()};
}
return {namePlaceholderElem.valueStringData()};
}
@@ -803,12 +799,9 @@ StatusWith<std::unique_ptr<ExpressionWithPlaceholder>> parseExprWithPlaceholder(
if (placeholder && (*placeholder != expectedPlaceholder)) {
return {ErrorCodes::FailedToParse,
str::stream() << expressionName << " expected a name placeholder of "
- << expectedPlaceholder
- << ", but '"
+ << expectedPlaceholder << ", but '"
<< exprWithPlaceholderElem.fieldNameStringData()
- << "' has a mismatching placeholder '"
- << *placeholder
- << "'"};
+ << "' has a mismatching placeholder '" << *placeholder << "'"};
}
return result;
}
@@ -1248,8 +1241,7 @@ StatusWithMatchExpression parseInternalSchemaFixedArityArgument(
if (static_cast<size_t>(inputObj.nFields()) != arity) {
return {ErrorCodes::FailedToParse,
str::stream() << elem.fieldNameStringData() << " requires exactly " << arity
- << " MatchExpressions, but got "
- << inputObj.nFields()};
+ << " MatchExpressions, but got " << inputObj.nFields()};
}
// Fill out 'expressions' with all of the parsed subexpressions contained in the array,
@@ -1320,17 +1312,16 @@ StatusWithMatchExpression parseInternalSchemaBinDataSubType(StringData name, BSO
auto valueAsInt = e.parseIntegerElementToInt();
if (!valueAsInt.isOK()) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Invalid numerical BinData subtype value for "
- << InternalSchemaBinDataSubTypeExpression::kName
- << ": "
- << e.number());
+ str::stream()
+ << "Invalid numerical BinData subtype value for "
+ << InternalSchemaBinDataSubTypeExpression::kName << ": " << e.number());
}
if (!isValidBinDataType(valueAsInt.getValue())) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << InternalSchemaBinDataSubTypeExpression::kName
- << " value must represent BinData subtype: "
- << valueAsInt.getValue());
+ str::stream()
+ << InternalSchemaBinDataSubTypeExpression::kName
+ << " value must represent BinData subtype: " << valueAsInt.getValue());
}
return {stdx::make_unique<InternalSchemaBinDataSubTypeExpression>(
diff --git a/src/mongo/db/matcher/expression_parser_array_test.cpp b/src/mongo/db/matcher/expression_parser_array_test.cpp
index 534b20f3a1e..8ead6ff5d2b 100644
--- a/src/mongo/db/matcher/expression_parser_array_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_array_test.cpp
@@ -198,16 +198,12 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef1) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << BSON("$eq" << match)));
@@ -224,16 +220,12 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef2) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << match));
@@ -251,17 +243,11 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef3) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "foo"
- << 12345);
+ << "$id" << oidx << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << match));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -273,14 +259,10 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef3) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
}
// Query with DBRef fields out of order.
@@ -288,22 +270,16 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef4) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db");
BSONObj matchOutOfOrder = BSON("$db"
<< "db"
- << "$id"
- << oid
- << "$ref"
+ << "$id" << oid << "$ref"
<< "coll");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << matchOutOfOrder));
@@ -322,19 +298,13 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef5) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
BSONObj matchOutOfOrder = BSON("foo" << 12345 << "$id" << oid << "$ref"
<< "coll");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "foo"
- << 12345);
+ << "$id" << oidx << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchOutOfOrder));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -346,14 +316,10 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef5) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
}
// Incomplete DBRef - $id missing.
@@ -361,20 +327,13 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef6) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
BSONObj matchMissingID = BSON("$ref"
<< "coll"
- << "foo"
- << 12345);
+ << "foo" << 12345);
BSONObj notMatch = BSON("$ref"
<< "collx"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchMissingID));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -386,14 +345,10 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef6) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
}
// Incomplete DBRef - $ref missing.
@@ -401,18 +356,12 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef7) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
BSONObj matchMissingRef = BSON("$id" << oid << "foo" << 12345);
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "foo"
- << 12345);
+ << "$id" << oidx << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchMissingRef));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -424,14 +373,10 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef7) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
}
// Incomplete DBRef - $db only.
@@ -439,24 +384,17 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef8) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"
- << "foo"
- << 12345);
+ << "foo" << 12345);
BSONObj matchDBOnly = BSON("$db"
<< "db"
- << "foo"
- << 12345);
+ << "foo" << 12345);
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "dbx"
- << "foo"
- << 12345);
+ << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchDBOnly));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -468,16 +406,12 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef8) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "$db"
- << "db"
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db"
+ << "foo" << 12345 << "bar" << 678)))));
}
TEST(MatchExpressionParserArrayTest, All1) {
@@ -843,4 +777,4 @@ TEST(MatchExpressionParserArrayTest, AllStringCollation) {
EqualityMatchExpression* eqMatch = static_cast<EqualityMatchExpression*>(child);
ASSERT_TRUE(eqMatch->getCollator() == &collator);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_parser_leaf_test.cpp b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
index acf3ec3742b..ad76f13ff92 100644
--- a/src/mongo/db/matcher/expression_parser_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
@@ -435,9 +435,7 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
OID oid = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"))));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
@@ -446,15 +444,11 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
<< "coll"
@@ -470,39 +464,28 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "dbx"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$db"
<< "db"
<< "$ref"
<< "coll"
- << "$id"
- << oid))));
+ << "$id" << oid))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
}
@@ -511,15 +494,11 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
OID oidy = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"))));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
@@ -528,15 +507,11 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
<< "coll"
@@ -544,15 +519,11 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oid << "$ref"
<< "coll"
@@ -560,9 +531,7 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "dbx")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oidy << "$ref"
<< "colly"
@@ -570,87 +539,59 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "colly"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "dbx")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "colly"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "colly"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db")))));
}
@@ -658,10 +599,7 @@ TEST(MatchExpressionParserLeafTest, INDBRefWithOptionalField1) {
OID oid = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345))));
+ << "$id" << oid << "foo" << 12345))));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
@@ -669,28 +607,19 @@ TEST(MatchExpressionParserLeafTest, INDBRefWithOptionalField1) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345)))));
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "collx"
- << "$id"
- << oidx
- << "foo"
- << 12345)
- << BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "collx"
+ << "$id" << oidx << "foo" << 12345)
+ << BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345)))));
}
TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
@@ -704,8 +633,7 @@ TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
// second field is not $id
query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$foo"
- << 1))));
+ << "$foo" << 1))));
result = MatchExpressionParser::parse(query, expCtx);
ASSERT_NOT_OK(result.getStatus());
@@ -719,8 +647,7 @@ TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
// missing $id and $ref field
query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$db"
<< "test"
- << "foo"
- << 3))));
+ << "foo" << 3))));
result = MatchExpressionParser::parse(query, expCtx);
ASSERT_NOT_OK(result.getStatus());
}
diff --git a/src/mongo/db/matcher/expression_parser_test.cpp b/src/mongo/db/matcher/expression_parser_test.cpp
index 17e77fa2522..e60bd62ccc0 100644
--- a/src/mongo/db/matcher/expression_parser_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_test.cpp
@@ -258,8 +258,7 @@ TEST(MatchExpressionParserTest, RegexParsesSuccessfullyWithOptionsNotInline) {
TEST(MatchExpressionParserTest, RegexDoesNotParseSuccessfullyWithMultipleOptions) {
auto query = BSON("a" << BSON("$options"
<< "s"
- << "$regex"
- << BSONRegEx("/myRegex/", "i")));
+ << "$regex" << BSONRegEx("/myRegex/", "i")));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
ASSERT_NOT_OK(MatchExpressionParser::parse(query, expCtx).getStatus());
}
@@ -267,8 +266,7 @@ TEST(MatchExpressionParserTest, RegexDoesNotParseSuccessfullyWithMultipleOptions
TEST(MatchExpressionParserTest, RegexParsesSuccessfullyWithOptionsFirst) {
auto query = BSON("a" << BSON("$options"
<< "s"
- << "$regex"
- << BSONRegEx("/myRegex/", "")));
+ << "$regex" << BSONRegEx("/myRegex/", "")));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
ASSERT_OK(MatchExpressionParser::parse(query, expCtx).getStatus());
}
@@ -276,8 +274,7 @@ TEST(MatchExpressionParserTest, RegexParsesSuccessfullyWithOptionsFirst) {
TEST(MatchExpressionParserTest, RegexParsesSuccessfullyWithOptionsFirstEmptyOptions) {
auto query = BSON("a" << BSON("$options"
<< ""
- << "$regex"
- << BSONRegEx("/myRegex/", "")));
+ << "$regex" << BSONRegEx("/myRegex/", "")));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
ASSERT_OK(MatchExpressionParser::parse(query, expCtx).getStatus());
}
diff --git a/src/mongo/db/matcher/expression_parser_tree_test.cpp b/src/mongo/db/matcher/expression_parser_tree_test.cpp
index 0cc3a23f06a..9aa066b7cca 100644
--- a/src/mongo/db/matcher/expression_parser_tree_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_tree_test.cpp
@@ -116,4 +116,4 @@ TEST(MatchExpressionParserLeafTest, NotRegex1) {
ASSERT(result.getValue()->matchesBSON(BSON("x"
<< "AC")));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_text.cpp b/src/mongo/db/matcher/expression_text.cpp
index 8fdcd65befc..3bba2e76a02 100644
--- a/src/mongo/db/matcher/expression_text.cpp
+++ b/src/mongo/db/matcher/expression_text.cpp
@@ -62,16 +62,14 @@ TextMatchExpression::TextMatchExpression(OperationContext* opCtx,
uassert(ErrorCodes::IndexNotFound,
str::stream() << "text index required for $text query (no such collection '"
- << nss.ns()
- << "')",
+ << nss.ns() << "')",
db);
Collection* collection = db->getCollection(opCtx, nss);
uassert(ErrorCodes::IndexNotFound,
str::stream() << "text index required for $text query (no such collection '"
- << nss.ns()
- << "')",
+ << nss.ns() << "')",
collection);
std::vector<const IndexDescriptor*> idxMatches;
diff --git a/src/mongo/db/matcher/expression_text_base.cpp b/src/mongo/db/matcher/expression_text_base.cpp
index ea3fa147de2..08f2ade599f 100644
--- a/src/mongo/db/matcher/expression_text_base.cpp
+++ b/src/mongo/db/matcher/expression_text_base.cpp
@@ -60,10 +60,8 @@ void TextMatchExpressionBase::serialize(BSONObjBuilder* out) const {
const fts::FTSQuery& ftsQuery = getFTSQuery();
out->append("$text",
BSON("$search" << ftsQuery.getQuery() << "$language" << ftsQuery.getLanguage()
- << "$caseSensitive"
- << ftsQuery.getCaseSensitive()
- << "$diacriticSensitive"
- << ftsQuery.getDiacriticSensitive()));
+ << "$caseSensitive" << ftsQuery.getCaseSensitive()
+ << "$diacriticSensitive" << ftsQuery.getDiacriticSensitive()));
}
bool TextMatchExpressionBase::equivalent(const MatchExpression* other) const {
diff --git a/src/mongo/db/matcher/expression_tree.cpp b/src/mongo/db/matcher/expression_tree.cpp
index 2cbdb1886f0..99d1886ab3c 100644
--- a/src/mongo/db/matcher/expression_tree.cpp
+++ b/src/mongo/db/matcher/expression_tree.cpp
@@ -419,4 +419,4 @@ MatchExpression::ExpressionOptimizerFunc NotMatchExpression::getOptimizer() cons
return expression;
};
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_tree.h b/src/mongo/db/matcher/expression_tree.h
index b89efb19461..8f9d66108a9 100644
--- a/src/mongo/db/matcher/expression_tree.h
+++ b/src/mongo/db/matcher/expression_tree.h
@@ -248,4 +248,4 @@ private:
std::unique_ptr<MatchExpression> _exp;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_tree_test.cpp b/src/mongo/db/matcher/expression_tree_test.cpp
index a0770c2a4df..cb79bb139ac 100644
--- a/src/mongo/db/matcher/expression_tree_test.cpp
+++ b/src/mongo/db/matcher/expression_tree_test.cpp
@@ -321,4 +321,4 @@ TEST(NorOp, Equivalent) {
ASSERT(e1.equivalent(&e1));
ASSERT(!e1.equivalent(&e2));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_type_test.cpp b/src/mongo/db/matcher/expression_type_test.cpp
index 403a60ee9f4..89c3795b6a2 100644
--- a/src/mongo/db/matcher/expression_type_test.cpp
+++ b/src/mongo/db/matcher/expression_type_test.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/db/matcher/expression_type.h"
#include "mongo/bson/json.h"
+#include "mongo/db/matcher/expression_type.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -314,4 +314,4 @@ TEST(InternalSchemaBinDataEncryptedTypeTest, DoesNotTraverseLeafArrays) {
}
} // namespace
-} // namepace mongo
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_where.cpp b/src/mongo/db/matcher/expression_where.cpp
index 997b673c6cc..c5a19e2f881 100644
--- a/src/mongo/db/matcher/expression_where.cpp
+++ b/src/mongo/db/matcher/expression_where.cpp
@@ -45,9 +45,9 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using stdx::make_unique;
WhereMatchExpression::WhereMatchExpression(OperationContext* opCtx,
@@ -110,4 +110,4 @@ unique_ptr<MatchExpression> WhereMatchExpression::shallowClone() const {
}
return std::move(e);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_where_noop.cpp b/src/mongo/db/matcher/expression_where_noop.cpp
index bd1469036e4..5668e0ad661 100644
--- a/src/mongo/db/matcher/expression_where_noop.cpp
+++ b/src/mongo/db/matcher/expression_where_noop.cpp
@@ -53,4 +53,4 @@ std::unique_ptr<MatchExpression> WhereNoOpMatchExpression::shallowClone() const
}
return std::move(e);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_with_placeholder.cpp b/src/mongo/db/matcher/expression_with_placeholder.cpp
index b0e1f1d118d..d0b2e65eb9e 100644
--- a/src/mongo/db/matcher/expression_with_placeholder.cpp
+++ b/src/mongo/db/matcher/expression_with_placeholder.cpp
@@ -65,11 +65,9 @@ StatusWith<boost::optional<StringData>> parseTopLevelFieldName(MatchExpression*
if (statusWithId.getValue() && placeholder != statusWithId.getValue()) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Expected a single top-level field name, found '"
- << *placeholder
- << "' and '"
- << *statusWithId.getValue()
- << "'");
+ str::stream()
+ << "Expected a single top-level field name, found '"
+ << *placeholder << "' and '" << *statusWithId.getValue() << "'");
}
}
return placeholder;
@@ -105,8 +103,7 @@ StatusWith<std::unique_ptr<ExpressionWithPlaceholder>> ExpressionWithPlaceholder
return Status(ErrorCodes::BadValue,
str::stream() << "The top-level field name must be an alphanumeric "
"string beginning with a lowercase letter, found '"
- << *placeholder
- << "'");
+ << *placeholder << "'");
}
}
diff --git a/src/mongo/db/matcher/match_details.cpp b/src/mongo/db/matcher/match_details.cpp
index 734ba6165e2..be9c657c3d9 100644
--- a/src/mongo/db/matcher/match_details.cpp
+++ b/src/mongo/db/matcher/match_details.cpp
@@ -68,4 +68,4 @@ string MatchDetails::toString() const {
ss << "elemMatchKey: " << (_elemMatchKey ? _elemMatchKey->c_str() : "NONE") << " ";
return ss.str();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/match_details.h b/src/mongo/db/matcher/match_details.h
index 9b364b34130..aadb5552b9f 100644
--- a/src/mongo/db/matcher/match_details.h
+++ b/src/mongo/db/matcher/match_details.h
@@ -77,4 +77,4 @@ private:
bool _elemMatchKeyRequested;
std::unique_ptr<std::string> _elemMatchKey;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/matchable.cpp b/src/mongo/db/matcher/matchable.cpp
index be404399189..5c5bfa55fd3 100644
--- a/src/mongo/db/matcher/matchable.cpp
+++ b/src/mongo/db/matcher/matchable.cpp
@@ -38,4 +38,4 @@ BSONMatchableDocument::BSONMatchableDocument(const BSONObj& obj) : _obj(obj) {
}
BSONMatchableDocument::~BSONMatchableDocument() {}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/matchable.h b/src/mongo/db/matcher/matchable.h
index b0e7a601b89..062a3f28826 100644
--- a/src/mongo/db/matcher/matchable.h
+++ b/src/mongo/db/matcher/matchable.h
@@ -48,7 +48,7 @@ public:
* The neewly returned ElementIterator is allowed to keep a pointer to path.
* So the caller of this function should make sure path is in scope until
* the ElementIterator is deallocated
- */
+ */
virtual ElementIterator* allocateIterator(const ElementPath* path) const = 0;
virtual void releaseIterator(ElementIterator* iterator) const = 0;
@@ -148,4 +148,4 @@ private:
mutable BSONElementIterator _iterator;
mutable bool _iteratorUsed;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/path.cpp b/src/mongo/db/matcher/path.cpp
index efd2c69aa06..1f6c8565d78 100644
--- a/src/mongo/db/matcher/path.cpp
+++ b/src/mongo/db/matcher/path.cpp
@@ -359,4 +359,4 @@ ElementIterator::Context BSONElementIterator::next() {
_next.reset();
return x;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/path.h b/src/mongo/db/matcher/path.h
index 1c0500e104f..88d759462f5 100644
--- a/src/mongo/db/matcher/path.h
+++ b/src/mongo/db/matcher/path.h
@@ -260,4 +260,4 @@ private:
std::unique_ptr<ElementIterator> _subCursor;
std::unique_ptr<ElementPath> _subCursorPath;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/path_accepting_keyword_test.cpp b/src/mongo/db/matcher/path_accepting_keyword_test.cpp
index 2a69e76afcf..52b42e5d959 100644
--- a/src/mongo/db/matcher/path_accepting_keyword_test.cpp
+++ b/src/mongo/db/matcher/path_accepting_keyword_test.cpp
@@ -49,33 +49,42 @@ TEST(PathAcceptingKeyword, CanParseKnownMatchTypes) {
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$in" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::NOT_EQUAL ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$ne" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::SIZE == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$size" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::SIZE ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$size" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::ALL ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$all" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::NOT_IN ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$nin" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::EXISTS == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$exists" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::EXISTS ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$exists" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::MOD ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$mod" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::TYPE == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$type" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::REGEX == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$regex" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::OPTIONS == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$options" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::TYPE ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$type" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::REGEX ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$regex" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::OPTIONS ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$options" << 1).firstElement()));
ASSERT_TRUE(
PathAcceptingKeyword::ELEM_MATCH ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$elemMatch" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::GEO_NEAR == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$near" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::GEO_NEAR == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$geoNear" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::WITHIN == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$within" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::WITHIN == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$geoWithin" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::GEO_NEAR ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$near" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::GEO_NEAR ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$geoNear" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::WITHIN ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$within" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::WITHIN ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$geoWithin" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::GEO_INTERSECTS ==
MatchExpressionParser::parsePathAcceptingKeyword(
BSON("$geoIntersects" << 1).firstElement()));
diff --git a/src/mongo/db/matcher/path_test.cpp b/src/mongo/db/matcher/path_test.cpp
index af7856d366a..dd0d7314ca9 100644
--- a/src/mongo/db/matcher/path_test.cpp
+++ b/src/mongo/db/matcher/path_test.cpp
@@ -566,4 +566,4 @@ TEST(SingleElementElementIterator, Simple1) {
ASSERT(!i.more());
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h b/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h
index 69b524f18f4..268bb97376e 100644
--- a/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h
+++ b/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h
@@ -43,9 +43,7 @@ public:
}
Validator getComparator() const final {
- return [strLen = strLen()](int lenWithoutNullTerm) {
- return lenWithoutNullTerm <= strLen;
- };
+ return [strLen = strLen()](int lenWithoutNullTerm) { return lenWithoutNullTerm <= strLen; };
}
std::unique_ptr<MatchExpression> shallowClone() const final {
diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h b/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h
index f3128007500..b0a7953f42d 100644
--- a/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h
+++ b/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h
@@ -43,9 +43,7 @@ public:
}
Validator getComparator() const final {
- return [strLen = strLen()](int lenWithoutNullTerm) {
- return lenWithoutNullTerm >= strLen;
- };
+ return [strLen = strLen()](int lenWithoutNullTerm) { return lenWithoutNullTerm >= strLen; };
}
std::unique_ptr<MatchExpression> shallowClone() const final {
diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
index 13bc5c47f1c..8eb9332aed7 100644
--- a/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
+++ b/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
@@ -80,8 +80,7 @@ TEST(InternalSchemaObjectMatchExpression, AcceptsObjectsThatMatch) {
<< "string"))));
ASSERT_TRUE(objMatch.matchesBSON(BSON("a" << BSON("b"
<< "string"
- << "c"
- << 1))));
+ << "c" << 1))));
ASSERT_FALSE(
objMatch.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 1) << BSON("b"
<< "string")))));
diff --git a/src/mongo/db/matcher/schema/json_pointer_test.cpp b/src/mongo/db/matcher/schema/json_pointer_test.cpp
index aed92b17784..f91d8888719 100644
--- a/src/mongo/db/matcher/schema/json_pointer_test.cpp
+++ b/src/mongo/db/matcher/schema/json_pointer_test.cpp
@@ -49,9 +49,8 @@ void assertPointerEvaluatesTo(std::string pointerStr,
}
TEST(JSONPointerTest, ParseInterestingCharacterFields) {
- BSONObj obj = BSON(
- "" << 1 << "c%d" << 2 << "e^f" << 3 << "g|h" << 4 << "i\\\\j" << 5 << "k\"l" << 6 << " "
- << 7);
+ BSONObj obj = BSON("" << 1 << "c%d" << 2 << "e^f" << 3 << "g|h" << 4 << "i\\\\j" << 5 << "k\"l"
+ << 6 << " " << 7);
assertPointerEvaluatesTo("/", obj, "", 1);
assertPointerEvaluatesTo("/c%d", obj, "c%d", 2);
assertPointerEvaluatesTo("/e^f", obj, "e^f", 3);
@@ -129,9 +128,8 @@ TEST(JSONPointerTest, ArrayTraversalTest) {
<< "value2")
<< BSON("builder3"
<< "value3"));
- auto topLevel =
- BSON("transit" << BSON("arrBottom" << arrBottom) << "arrTop" << arrTop << "toBSONArray"
- << bsonArray);
+ auto topLevel = BSON("transit" << BSON("arrBottom" << arrBottom) << "arrTop" << arrTop
+ << "toBSONArray" << bsonArray);
assertPointerEvaluatesTo("/transit/arrBottom/0", topLevel, "0", 0);
assertPointerEvaluatesTo("/toBSONArray/0/builder0", topLevel, "builder0", "value0");
assertPointerEvaluatesTo("/toBSONArray/3/builder3", topLevel, "builder3", "value3");
diff --git a/src/mongo/db/matcher/schema/json_schema_parser.cpp b/src/mongo/db/matcher/schema/json_schema_parser.cpp
index 6f0d6e6c947..72ed89bf3f6 100644
--- a/src/mongo/db/matcher/schema/json_schema_parser.cpp
+++ b/src/mongo/db/matcher/schema/json_schema_parser.cpp
@@ -71,7 +71,12 @@ namespace {
// Explicitly unsupported JSON Schema keywords.
const std::set<StringData> unsupportedKeywords{
- "$ref"_sd, "$schema"_sd, "default"_sd, "definitions"_sd, "format"_sd, "id"_sd,
+ "$ref"_sd,
+ "$schema"_sd,
+ "default"_sd,
+ "definitions"_sd,
+ "format"_sd,
+ "id"_sd,
};
constexpr StringData kNamePlaceholder = "i"_sd;
@@ -173,9 +178,9 @@ StatusWithMatchExpression parseMaximum(StringData path,
bool isExclusiveMaximum) {
if (!maximum.isNumber()) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaMaximumKeyword
- << "' must be a number")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMaximumKeyword
+ << "' must be a number")};
}
if (path.empty()) {
@@ -201,9 +206,9 @@ StatusWithMatchExpression parseMinimum(StringData path,
bool isExclusiveMinimum) {
if (!minimum.isNumber()) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaMinimumKeyword
- << "' must be a number")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMinimumKeyword
+ << "' must be a number")};
}
if (path.empty()) {
@@ -249,9 +254,9 @@ StatusWithMatchExpression parsePattern(StringData path,
InternalSchemaTypeExpression* typeExpr) {
if (pattern.type() != BSONType::String) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaPatternKeyword
- << "' must be a string")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaPatternKeyword
+ << "' must be a string")};
}
if (path.empty()) {
@@ -271,16 +276,16 @@ StatusWithMatchExpression parseMultipleOf(StringData path,
InternalSchemaTypeExpression* typeExpr) {
if (!multipleOf.isNumber()) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaMultipleOfKeyword
- << "' must be a number")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMultipleOfKeyword
+ << "' must be a number")};
}
if (multipleOf.numberDecimal().isNegative() || multipleOf.numberDecimal().isZero()) {
return {Status(ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaMultipleOfKeyword
- << "' must have a positive value")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMultipleOfKeyword
+ << "' must have a positive value")};
}
if (path.empty()) {
return {stdx::make_unique<AlwaysTrueMatchExpression>()};
@@ -405,7 +410,7 @@ StatusWith<StringDataSet> parseRequired(BSONElement requiredElt) {
<< propertyName.type()};
}
- const auto[it, didInsert] = properties.insert(propertyName.valueStringData());
+ const auto [it, didInsert] = properties.insert(propertyName.valueStringData());
if (!didInsert) {
return {ErrorCodes::FailedToParse,
str::stream() << "$jsonSchema keyword '"
@@ -458,9 +463,9 @@ StatusWithMatchExpression parseProperties(const boost::intrusive_ptr<ExpressionC
bool ignoreUnknownKeywords) {
if (propertiesElt.type() != BSONType::Object) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaPropertiesKeyword
- << "' must be an object")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaPropertiesKeyword
+ << "' must be an object")};
}
auto propertiesObj = propertiesElt.embeddedObject();
@@ -469,8 +474,7 @@ StatusWithMatchExpression parseProperties(const boost::intrusive_ptr<ExpressionC
if (property.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
str::stream() << "Nested schema for $jsonSchema property '"
- << property.fieldNameStringData()
- << "' must be an object"};
+ << property.fieldNameStringData() << "' must be an object"};
}
auto nestedSchemaMatch = _parse(expCtx,
@@ -532,11 +536,11 @@ StatusWith<std::vector<PatternSchema>> parsePatternProperties(
for (auto&& patternSchema : patternPropertiesElt.embeddedObject()) {
if (patternSchema.type() != BSONType::Object) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaPatternPropertiesKeyword
- << "' has property '"
- << patternSchema.fieldNameStringData()
- << "' which is not an object")};
+ str::stream()
+ << "$jsonSchema keyword '"
+ << JSONSchemaParser::kSchemaPatternPropertiesKeyword
+ << "' has property '" << patternSchema.fieldNameStringData()
+ << "' which is not an object")};
}
// Parse the nested schema using a placeholder as the path, since we intend on using the
@@ -840,11 +844,11 @@ StatusWith<boost::optional<long long>> parseItems(
for (auto subschema : itemsElt.embeddedObject()) {
if (subschema.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaItemsKeyword
- << "' requires that each element of the array is an "
- "object, but found a "
- << subschema.type()};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaItemsKeyword
+ << "' requires that each element of the array is an "
+ "object, but found a "
+ << subschema.type()};
}
// We want to make an ExpressionWithPlaceholder for $_internalSchemaMatchArrayIndex,
@@ -895,8 +899,7 @@ StatusWith<boost::optional<long long>> parseItems(
} else {
return {ErrorCodes::TypeMismatch,
str::stream() << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaItemsKeyword
- << "' must be an array or an object, not "
- << itemsElt.type()};
+ << "' must be an array or an object, not " << itemsElt.type()};
}
return startIndexForAdditionalItems;
@@ -1267,8 +1270,7 @@ Status translateScalarKeywords(StringMap<BSONElement>& keywordMap,
return {ErrorCodes::FailedToParse,
str::stream() << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMaximumKeyword
<< "' must be a present if "
- << JSONSchemaParser::kSchemaExclusiveMaximumKeyword
- << " is present"};
+ << JSONSchemaParser::kSchemaExclusiveMaximumKeyword << " is present"};
}
if (auto minimumElt = keywordMap[JSONSchemaParser::kSchemaMinimumKeyword]) {
@@ -1294,8 +1296,7 @@ Status translateScalarKeywords(StringMap<BSONElement>& keywordMap,
return {ErrorCodes::FailedToParse,
str::stream() << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMinimumKeyword
<< "' must be a present if "
- << JSONSchemaParser::kSchemaExclusiveMinimumKeyword
- << " is present"};
+ << JSONSchemaParser::kSchemaExclusiveMinimumKeyword << " is present"};
}
return Status::OK();
@@ -1316,19 +1317,17 @@ Status translateEncryptionKeywords(StringMap<BSONElement>& keywordMap,
expCtx->maxFeatureCompatibilityVersion <
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42) {
return Status(ErrorCodes::QueryFeatureNotAllowed,
- str::stream() << "The featureCompatiblityVersion must be 4.2 to use "
- "encryption keywords in $jsonSchema. See "
- << feature_compatibility_version_documentation::kUpgradeLink
- << ".");
+ str::stream()
+ << "The featureCompatiblityVersion must be 4.2 to use "
+ "encryption keywords in $jsonSchema. See "
+ << feature_compatibility_version_documentation::kUpgradeLink << ".");
}
if (encryptElt && encryptMetadataElt) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Cannot specify both $jsonSchema keywords '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' and '"
- << JSONSchemaParser::kSchemaEncryptMetadataKeyword
- << "'");
+ << JSONSchemaParser::kSchemaEncryptKeyword << "' and '"
+ << JSONSchemaParser::kSchemaEncryptMetadataKeyword << "'");
}
if (encryptMetadataElt) {
@@ -1398,9 +1397,9 @@ Status validateMetadataKeywords(StringMap<BSONElement>& keywordMap) {
if (auto titleElem = keywordMap[JSONSchemaParser::kSchemaTitleKeyword]) {
if (titleElem.type() != BSONType::String) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaTitleKeyword
- << "' must be of type string");
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaTitleKeyword
+ << "' must be of type string");
}
}
return Status::OK();
@@ -1455,16 +1454,16 @@ StatusWithMatchExpression _parse(const boost::intrusive_ptr<ExpressionContext>&
<< "' is not currently supported");
} else if (!ignoreUnknownKeywords) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Unknown $jsonSchema keyword: "
- << elt.fieldNameStringData());
+ str::stream()
+ << "Unknown $jsonSchema keyword: " << elt.fieldNameStringData());
}
continue;
}
if (it->second) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Duplicate $jsonSchema keyword: "
- << elt.fieldNameStringData());
+ str::stream()
+ << "Duplicate $jsonSchema keyword: " << elt.fieldNameStringData());
}
keywordMap[elt.fieldNameStringData()] = elt;
@@ -1481,28 +1480,24 @@ StatusWithMatchExpression _parse(const boost::intrusive_ptr<ExpressionContext>&
if (typeElem && bsonTypeElem) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Cannot specify both $jsonSchema keywords '"
- << JSONSchemaParser::kSchemaTypeKeyword
- << "' and '"
- << JSONSchemaParser::kSchemaBsonTypeKeyword
- << "'");
+ << JSONSchemaParser::kSchemaTypeKeyword << "' and '"
+ << JSONSchemaParser::kSchemaBsonTypeKeyword << "'");
} else if (typeElem && encryptElem) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' cannot be used in conjunction with '"
- << JSONSchemaParser::kSchemaTypeKeyword
- << "', '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' implies type 'bsonType::BinData'");
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaEncryptKeyword
+ << "' cannot be used in conjunction with '"
+ << JSONSchemaParser::kSchemaTypeKeyword << "', '"
+ << JSONSchemaParser::kSchemaEncryptKeyword
+ << "' implies type 'bsonType::BinData'");
} else if (bsonTypeElem && encryptElem) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' cannot be used in conjunction with '"
- << JSONSchemaParser::kSchemaBsonTypeKeyword
- << "', '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' implies type 'bsonType::BinData'");
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaEncryptKeyword
+ << "' cannot be used in conjunction with '"
+ << JSONSchemaParser::kSchemaBsonTypeKeyword << "', '"
+ << JSONSchemaParser::kSchemaEncryptKeyword
+ << "' implies type 'bsonType::BinData'");
}
std::unique_ptr<InternalSchemaTypeExpression> typeExpr;
@@ -1593,25 +1588,25 @@ StatusWith<MatcherTypeSet> JSONSchemaParser::parseTypeSet(BSONElement typeElt,
for (auto&& typeArrayEntry : typeElt.embeddedObject()) {
if (typeArrayEntry.type() != BSONType::String) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << typeElt.fieldNameStringData()
- << "' array elements must be strings")};
+ str::stream()
+ << "$jsonSchema keyword '" << typeElt.fieldNameStringData()
+ << "' array elements must be strings")};
}
if (typeArrayEntry.valueStringData() == JSONSchemaParser::kSchemaTypeInteger) {
return {ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema type '"
- << JSONSchemaParser::kSchemaTypeInteger
- << "' is not currently supported."};
+ str::stream()
+ << "$jsonSchema type '" << JSONSchemaParser::kSchemaTypeInteger
+ << "' is not currently supported."};
}
auto insertionResult = aliases.insert(typeArrayEntry.valueStringData());
if (!insertionResult.second) {
- return {Status(ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema keyword '"
- << typeElt.fieldNameStringData()
- << "' has duplicate value: "
- << typeArrayEntry.valueStringData())};
+ return {
+ Status(ErrorCodes::FailedToParse,
+ str::stream()
+ << "$jsonSchema keyword '" << typeElt.fieldNameStringData()
+ << "' has duplicate value: " << typeArrayEntry.valueStringData())};
}
}
}
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 756263a6ff5..11a75108e9c 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -417,8 +417,7 @@ Status storeMongodOptions(const moe::Environment& params) {
storageGlobalParams.syncdelay > StorageGlobalParams::kMaxSyncdelaySecs) {
return Status(ErrorCodes::BadValue,
str::stream() << "syncdelay out of allowed range (0-"
- << StorageGlobalParams::kMaxSyncdelaySecs
- << "s)");
+ << StorageGlobalParams::kMaxSyncdelaySecs << "s)");
}
}
@@ -457,9 +456,9 @@ Status storeMongodOptions(const moe::Environment& params) {
if (journalCommitIntervalMs < 1 ||
journalCommitIntervalMs > StorageGlobalParams::kMaxJournalCommitIntervalMs) {
return Status(ErrorCodes::BadValue,
- str::stream() << "--journalCommitInterval out of allowed range (1-"
- << StorageGlobalParams::kMaxJournalCommitIntervalMs
- << "ms)");
+ str::stream()
+ << "--journalCommitInterval out of allowed range (1-"
+ << StorageGlobalParams::kMaxJournalCommitIntervalMs << "ms)");
}
}
diff --git a/src/mongo/db/mongod_options.h b/src/mongo/db/mongod_options.h
index 62f86b51611..4ed3efa0afa 100644
--- a/src/mongo/db/mongod_options.h
+++ b/src/mongo/db/mongod_options.h
@@ -84,4 +84,4 @@ Status storeMongodOptions(const moe::Environment& params);
* Help test user for storage.dbPath config option.
*/
std::string storageDBPathDescription();
-}
+} // namespace mongo
diff --git a/src/mongo/db/multi_key_path_tracker.cpp b/src/mongo/db/multi_key_path_tracker.cpp
index d1c2c1ca293..d78271932e5 100644
--- a/src/mongo/db/multi_key_path_tracker.cpp
+++ b/src/mongo/db/multi_key_path_tracker.cpp
@@ -61,8 +61,8 @@ std::string MultikeyPathTracker::dumpMultikeyPaths(const MultikeyPaths& multikey
void MultikeyPathTracker::mergeMultikeyPaths(MultikeyPaths* toMergeInto,
const MultikeyPaths& newPaths) {
invariant(toMergeInto->size() == newPaths.size(),
- str::stream() << "toMergeInto: " << dumpMultikeyPaths(*toMergeInto) << "; newPaths: "
- << dumpMultikeyPaths(newPaths));
+ str::stream() << "toMergeInto: " << dumpMultikeyPaths(*toMergeInto)
+ << "; newPaths: " << dumpMultikeyPaths(newPaths));
for (auto idx = std::size_t(0); idx < toMergeInto->size(); ++idx) {
toMergeInto->at(idx).insert(newPaths[idx].begin(), newPaths[idx].end());
}
diff --git a/src/mongo/db/multi_key_path_tracker_test.cpp b/src/mongo/db/multi_key_path_tracker_test.cpp
index 580b69519f3..9203ff5ff4a 100644
--- a/src/mongo/db/multi_key_path_tracker_test.cpp
+++ b/src/mongo/db/multi_key_path_tracker_test.cpp
@@ -47,8 +47,7 @@ void assertMultikeyPathsAreEqual(const MultikeyPaths& actual, const MultikeyPath
if (!match) {
FAIL(str::stream() << "Expected: " << MultikeyPathTracker::dumpMultikeyPaths(expected)
<< ", "
- << "Actual: "
- << MultikeyPathTracker::dumpMultikeyPaths(actual));
+ << "Actual: " << MultikeyPathTracker::dumpMultikeyPaths(actual));
}
ASSERT(match);
}
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index 5fbb645c09c..5e87c48e59c 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -194,8 +194,8 @@ StatusWith<repl::OpTime> NamespaceString::getDropPendingNamespaceOpTime() const
long long term;
status = mongo::parseNumberFromString(opTimeStr.substr(termSeparatorIndex + 1), &term);
if (!status.isOK()) {
- return status.withContext(str::stream() << "Invalid term in drop-pending namespace: "
- << _ns);
+ return status.withContext(str::stream()
+ << "Invalid term in drop-pending namespace: " << _ns);
}
return repl::OpTime(Timestamp(Seconds(seconds), increment), term);
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index d55722595ac..6dd6e52f66e 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -534,11 +534,8 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
if (!collElem || args.nss.ns() == collElem.String()) {
uasserted(40654,
str::stream() << "failCollectionUpdates failpoint enabled, namespace: "
- << args.nss.ns()
- << ", update: "
- << args.updateArgs.update
- << " on document with "
- << args.updateArgs.criteria);
+ << args.nss.ns() << ", update: " << args.updateArgs.update
+ << " on document with " << args.updateArgs.criteria);
}
}
@@ -1228,7 +1225,6 @@ void logCommitOrAbortForPreparedTransaction(OperationContext* opCtx,
writeConflictRetry(
opCtx, "onPreparedTransactionCommitOrAbort", NamespaceString::kRsOplogNamespace.ns(), [&] {
-
// Writes to the oplog only require a Global intent lock. Guaranteed by
// OplogSlotReserver.
invariant(opCtx->lockState()->isWriteLocked());
diff --git a/src/mongo/db/op_observer_impl_test.cpp b/src/mongo/db/op_observer_impl_test.cpp
index 147c1960e50..92f55a4ba40 100644
--- a/src/mongo/db/op_observer_impl_test.cpp
+++ b/src/mongo/db/op_observer_impl_test.cpp
@@ -120,12 +120,10 @@ TEST_F(OpObserverTest, StartIndexBuildExpectedOplogEntry) {
BSONObj specX = BSON("key" << BSON("x" << 1) << "name"
<< "x_1"
- << "v"
- << 2);
+ << "v" << 2);
BSONObj specA = BSON("key" << BSON("a" << 1) << "name"
<< "a_1"
- << "v"
- << 2);
+ << "v" << 2);
std::vector<BSONObj> specs = {specX, specA};
// Write to the oplog.
@@ -162,12 +160,10 @@ TEST_F(OpObserverTest, CommitIndexBuildExpectedOplogEntry) {
BSONObj specX = BSON("key" << BSON("x" << 1) << "name"
<< "x_1"
- << "v"
- << 2);
+ << "v" << 2);
BSONObj specA = BSON("key" << BSON("a" << 1) << "name"
<< "a_1"
- << "v"
- << 2);
+ << "v" << 2);
std::vector<BSONObj> specs = {specX, specA};
// Write to the oplog.
@@ -204,12 +200,10 @@ TEST_F(OpObserverTest, AbortIndexBuildExpectedOplogEntry) {
BSONObj specX = BSON("key" << BSON("x" << 1) << "name"
<< "x_1"
- << "v"
- << 2);
+ << "v" << 2);
BSONObj specA = BSON("key" << BSON("a" << 1) << "name"
<< "a_1"
- << "v"
- << 2);
+ << "v" << 2);
std::vector<BSONObj> specs = {specX, specA};
// Write to the oplog.
@@ -289,8 +283,7 @@ TEST_F(OpObserverTest, CollModWithCollectionOptionsAndTTLInfo) {
BSON("collectionOptions_old"
<< BSON("validationLevel" << oldCollOpts.validationLevel << "validationAction"
<< oldCollOpts.validationAction)
- << "expireAfterSeconds_old"
- << durationCount<Seconds>(ttlInfo.oldExpireAfterSeconds));
+ << "expireAfterSeconds_old" << durationCount<Seconds>(ttlInfo.oldExpireAfterSeconds));
ASSERT_BSONOBJ_EQ(o2Expected, o2);
}
@@ -392,10 +385,9 @@ TEST_F(OpObserverTest, OnRenameCollectionReturnsRenameOpTime) {
// Ensure that renameCollection fields were properly added to oplog entry.
ASSERT_EQUALS(uuid, unittest::assertGet(UUID::parse(oplogEntry["ui"])));
auto o = oplogEntry.getObjectField("o");
- auto oExpected = BSON(
- "renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "stayTemp" << stayTemp
- << "dropTarget"
- << dropTargetUuid);
+ auto oExpected =
+ BSON("renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "stayTemp"
+ << stayTemp << "dropTarget" << dropTargetUuid);
ASSERT_BSONOBJ_EQ(oExpected, o);
// Ensure that the rename optime returned is the same as the last optime in the ReplClientInfo.
@@ -424,8 +416,8 @@ TEST_F(OpObserverTest, OnRenameCollectionOmitsDropTargetFieldIfDropTargetUuidIsN
// Ensure that renameCollection fields were properly added to oplog entry.
ASSERT_EQUALS(uuid, unittest::assertGet(UUID::parse(oplogEntry["ui"])));
auto o = oplogEntry.getObjectField("o");
- auto oExpected = BSON(
- "renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "stayTemp" << stayTemp);
+ auto oExpected = BSON("renameCollection" << sourceNss.ns() << "to" << targetNss.ns()
+ << "stayTemp" << stayTemp);
ASSERT_BSONOBJ_EQ(oExpected, o);
}
@@ -734,45 +726,28 @@ TEST_F(OpObserverTransactionTest, TransactionalPrepareTest) {
checkCommonFields(oplogEntryObj);
OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj));
auto o = oplogEntry.getObject();
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0 << "data"
- << "x"))
- << BSON("op"
- << "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1 << "data"
- << "y"))
- << BSON("op"
- << "u"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("$set" << BSON("data"
- << "y"))
- << "o2"
- << BSON("_id" << 0))
- << BSON("op"
- << "d"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "prepare"
- << true);
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 0 << "data"
+ << "x"))
+ << BSON("op"
+ << "i"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 1 << "data"
+ << "y"))
+ << BSON("op"
+ << "u"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("$set" << BSON("data"
+ << "y"))
+ << "o2" << BSON("_id" << 0))
+ << BSON("op"
+ << "d"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 0)))
+ << "prepare" << true);
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT(oplogEntry.shouldPrepare());
ASSERT_EQ(oplogEntry.getTimestamp(), opCtx()->recoveryUnit()->getPrepareTimestamp());
@@ -837,16 +812,11 @@ TEST_F(OpObserverTransactionTest, TransactionalPreparedCommitTest) {
checkCommonFields(oplogEntryObj);
OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj));
auto o = oplogEntry.getObject();
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.toString()
- << "ui"
- << uuid
- << "o"
- << doc))
- << "prepare"
- << true);
+ auto oExpected = BSON(
+ "applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.toString() << "ui" << uuid << "o" << doc))
+ << "prepare" << true);
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT(oplogEntry.shouldPrepare());
}
@@ -905,16 +875,11 @@ TEST_F(OpObserverTransactionTest, TransactionalPreparedAbortTest) {
checkCommonFields(oplogEntryObj);
OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj));
auto o = oplogEntry.getObject();
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.toString()
- << "ui"
- << uuid
- << "o"
- << doc))
- << "prepare"
- << true);
+ auto oExpected = BSON(
+ "applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.toString() << "ui" << uuid << "o" << doc))
+ << "prepare" << true);
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT(oplogEntry.shouldPrepare());
}
@@ -1159,42 +1124,27 @@ TEST_F(OpObserverTransactionTest, TransactionalInsertTest) {
checkCommonFields(oplogEntryObj);
OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj));
auto o = oplogEntry.getObject();
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0 << "data"
- << "x"))
- << BSON("op"
- << "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1 << "data"
- << "y"))
- << BSON("op"
- << "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2 << "data"
- << "z"))
- << BSON("op"
- << "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3 << "data"
- << "w"))));
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 0 << "data"
+ << "x"))
+ << BSON("op"
+ << "i"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 1 << "data"
+ << "y"))
+ << BSON("op"
+ << "i"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("_id" << 2 << "data"
+ << "z"))
+ << BSON("op"
+ << "i"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("_id" << 3 << "data"
+ << "w"))));
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT(!oplogEntry.shouldPrepare());
ASSERT_FALSE(oplogEntryObj.hasField("prepare"));
@@ -1236,28 +1186,19 @@ TEST_F(OpObserverTransactionTest, TransactionalUpdateTest) {
auto oplogEntry = getSingleOplogEntry(opCtx());
checkCommonFields(oplogEntry);
auto o = oplogEntry.getObjectField("o");
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("$set" << BSON("data"
- << "x"))
- << "o2"
- << BSON("_id" << 0))
- << BSON("op"
- << "u"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("$set" << BSON("data"
- << "y"))
- << "o2"
- << BSON("_id" << 1))));
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("$set" << BSON("data"
+ << "x"))
+ << "o2" << BSON("_id" << 0))
+ << BSON("op"
+ << "u"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("$set" << BSON("data"
+ << "y"))
+ << "o2" << BSON("_id" << 1))));
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT_FALSE(oplogEntry.hasField("prepare"));
ASSERT_FALSE(oplogEntry.getBoolField("prepare"));
@@ -1292,20 +1233,12 @@ TEST_F(OpObserverTransactionTest, TransactionalDeleteTest) {
auto o = oplogEntry.getObjectField("o");
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0))
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0))
<< BSON("op"
<< "d"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 1))));
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 1))));
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT_FALSE(oplogEntry.hasField("prepare"));
ASSERT_FALSE(oplogEntry.getBoolField("prepare"));
@@ -1350,12 +1283,8 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionSingleStatementTest) {
// The implicit commit oplog entry.
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss.toString()
- << "ui"
- << uuid
- << "o"
- << BSON("_id" << 0))));
+ << "ns" << nss.toString() << "ui" << uuid
+ << "o" << BSON("_id" << 0))));
ASSERT_BSONOBJ_EQ(oExpected, oplogEntry.getObject());
}
@@ -1394,52 +1323,32 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalInsertTest) {
}
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 1)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2)))
- << "partialTxn"
- << true);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 2)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[2].getObject());
// This should be the implicit commit oplog entry, indicated by the absence of the 'partialTxn'
// field.
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3)))
- << "count"
- << 4);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 3)))
+ << "count" << 4);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[3].getObject());
}
@@ -1490,36 +1399,26 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalUpdateTest) {
expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()};
}
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("$set" << BSON("data"
- << "x"))
- << "o2"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("$set" << BSON("data"
+ << "x"))
+ << "o2" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
// This should be the implicit commit oplog entry, indicated by the absence of the 'partialTxn'
// field.
- oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("$set" << BSON("data"
- << "y"))
- << "o2"
- << BSON("_id" << 1)))
- << "count"
- << 2);
+ oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("$set" << BSON("data"
+ << "y"))
+ << "o2" << BSON("_id" << 1)))
+ << "count" << 2);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
}
@@ -1563,28 +1462,18 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalDeleteTest) {
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
// This should be the implicit commit oplog entry, indicated by the absence of the 'partialTxn'
// field.
oExpected = oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 1)))
- << "count"
- << 2);
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 1)))
+ << "count" << 2);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
}
@@ -1634,52 +1523,30 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalInsertPrepareTest) {
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 1)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2)))
- << "partialTxn"
- << true);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 2)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[2].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3)))
- << "prepare"
- << true
- << "count"
- << 4);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 3)))
+ << "prepare" << true << "count" << 4);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[3].getObject());
ASSERT_EQ(prepareOpTime.getTimestamp(), opCtx()->recoveryUnit()->getPrepareTimestamp());
@@ -1742,36 +1609,24 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalUpdatePrepareTest) {
expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()};
}
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("$set" << BSON("data"
- << "x"))
- << "o2"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("$set" << BSON("data"
+ << "x"))
+ << "o2" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
- oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("$set" << BSON("data"
- << "y"))
- << "o2"
- << BSON("_id" << 1)))
- << "prepare"
- << true
- << "count"
- << 2);
+ oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("$set" << BSON("data"
+ << "y"))
+ << "o2" << BSON("_id" << 1)))
+ << "prepare" << true << "count" << 2);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
ASSERT_EQ(prepareOpTime.getTimestamp(), opCtx()->recoveryUnit()->getPrepareTimestamp());
@@ -1831,28 +1686,16 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalDeletePrepareTest) {
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 1)))
- << "prepare"
- << true
- << "count"
- << 2);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 1)))
+ << "prepare" << true << "count" << 2);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
ASSERT_EQ(prepareOpTime.getTimestamp(), opCtx()->recoveryUnit()->getPrepareTimestamp());
@@ -2060,36 +1903,20 @@ TEST_F(OpObserverMultiEntryTransactionTest, UnpreparedTransactionPackingTest) {
}
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0))
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0))
<< BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1))
+ << "ns" << nss1.toString() << "ui"
+ << uuid1 << "o" << BSON("_id" << 1))
<< BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2))
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 2))
<< BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3))));
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 3))));
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
}
@@ -2133,38 +1960,21 @@ TEST_F(OpObserverMultiEntryTransactionTest, PreparedTransactionPackingTest) {
expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()};
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0))
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0))
<< BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1))
+ << "ns" << nss1.toString() << "ui"
+ << uuid1 << "o" << BSON("_id" << 1))
<< BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2))
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 2))
<< BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3)))
- << "prepare"
- << true);
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 3)))
+ << "prepare" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
}
diff --git a/src/mongo/db/op_observer_util.h b/src/mongo/db/op_observer_util.h
index e3a7d195e7a..7e60c66cca8 100644
--- a/src/mongo/db/op_observer_util.h
+++ b/src/mongo/db/op_observer_util.h
@@ -42,4 +42,4 @@ BSONObj makeCreateCollCmdObj(const NamespaceString& collectionName,
BSONObj makeCollModCmdObj(const BSONObj& collModCmd,
const CollectionOptions& oldCollOptions,
boost::optional<TTLCollModInfo> ttlInfo);
-}
+} // namespace mongo
diff --git a/src/mongo/db/operation_time_tracker.cpp b/src/mongo/db/operation_time_tracker.cpp
index 9c2b6d74774..27832209b69 100644
--- a/src/mongo/db/operation_time_tracker.cpp
+++ b/src/mongo/db/operation_time_tracker.cpp
@@ -42,7 +42,7 @@ struct OperationTimeTrackerHolder {
const OperationContext::Decoration<OperationTimeTrackerHolder> OperationTimeTrackerHolder::get =
OperationContext::declareDecoration<OperationTimeTrackerHolder>();
-}
+} // namespace
std::shared_ptr<OperationTimeTracker> OperationTimeTracker::get(OperationContext* opCtx) {
auto timeTrackerHolder = OperationTimeTrackerHolder::get(opCtx);
diff --git a/src/mongo/db/ops/delete.h b/src/mongo/db/ops/delete.h
index 99ebccf0378..b26f583d460 100644
--- a/src/mongo/db/ops/delete.h
+++ b/src/mongo/db/ops/delete.h
@@ -50,4 +50,4 @@ long long deleteObjects(OperationContext* opCtx,
bool justOne,
bool god = false,
bool fromMigrate = false);
-}
+} // namespace mongo
diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp
index d891c998a7c..dfc841588dc 100644
--- a/src/mongo/db/ops/insert.cpp
+++ b/src/mongo/db/ops/insert.cpp
@@ -58,9 +58,9 @@ Status validateDepth(const BSONObj& obj) {
// We're exactly at the limit, so descending to the next level would exceed
// the maximum depth.
return {ErrorCodes::Overflow,
- str::stream() << "cannot insert document because it exceeds "
- << BSONDepth::getMaxDepthForUserStorage()
- << " levels of nesting"};
+ str::stream()
+ << "cannot insert document because it exceeds "
+ << BSONDepth::getMaxDepthForUserStorage() << " levels of nesting"};
}
frames.emplace_back(elem.embeddedObject());
}
@@ -78,10 +78,8 @@ StatusWith<BSONObj> fixDocumentForInsert(ServiceContext* service, const BSONObj&
if (doc.objsize() > BSONObjMaxUserSize)
return StatusWith<BSONObj>(ErrorCodes::BadValue,
str::stream() << "object to insert too large"
- << ". size in bytes: "
- << doc.objsize()
- << ", max size: "
- << BSONObjMaxUserSize);
+ << ". size in bytes: " << doc.objsize()
+ << ", max size: " << BSONObjMaxUserSize);
auto depthStatus = validateDepth(doc);
if (!depthStatus.isOK()) {
@@ -206,11 +204,9 @@ Status userAllowedCreateNS(StringData db, StringData coll) {
if (db.size() + 1 /* dot */ + coll.size() > NamespaceString::MaxNsCollectionLen)
return Status(ErrorCodes::InvalidNamespace,
- str::stream() << "fully qualified namespace " << db << '.' << coll
- << " is too long "
- << "(max is "
- << NamespaceString::MaxNsCollectionLen
- << " bytes)");
+ str::stream()
+ << "fully qualified namespace " << db << '.' << coll << " is too long "
+ << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)");
// check spceial areas
@@ -274,4 +270,4 @@ Status userAllowedCreateNS(StringData db, StringData coll) {
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/ops/insert.h b/src/mongo/db/ops/insert.h
index ebbf9738460..8bdcbadc281 100644
--- a/src/mongo/db/ops/insert.h
+++ b/src/mongo/db/ops/insert.h
@@ -58,4 +58,4 @@ Status userAllowedWriteNS(const NamespaceString& ns);
* operations. If not, returns an error Status.
*/
Status userAllowedCreateNS(StringData db, StringData coll);
-}
+} // namespace mongo
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index a506bb88c0c..a600f37a543 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -82,8 +82,7 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
if (userInitiatedWritesAndNotPrimary) {
uassertStatusOK(Status(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while creating collection "
- << nsString
- << " during upsert"));
+ << nsString << " during upsert"));
}
WriteUnitOfWork wuow(opCtx);
collection = db->createCollection(opCtx, nsString, CollectionOptions());
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index fc39b35d0c9..685672de251 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -365,8 +365,9 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
"hangDuringBatchInsert",
[&wholeOp]() {
log() << "batch insert - hangDuringBatchInsert fail point enabled for namespace "
- << wholeOp.getNamespace() << ". Blocking "
- "until fail point is disabled.";
+ << wholeOp.getNamespace()
+ << ". Blocking "
+ "until fail point is disabled.";
},
true, // Check for interrupt periodically.
wholeOp.getNamespace());
@@ -504,7 +505,6 @@ WriteResult performInserts(OperationContext* opCtx,
durationCount<Microseconds>(curOp.elapsedTimeExcludingPauses()),
curOp.isCommand(),
curOp.getReadWriteType());
-
});
{
@@ -861,7 +861,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
"until fail point is disabled.";
},
true // Check for interrupt periodically.
- );
+ );
if (MONGO_FAIL_POINT(failAllRemoves)) {
uasserted(ErrorCodes::InternalError, "failAllRemoves failpoint active!");
}
diff --git a/src/mongo/db/ops/write_ops_parsers.cpp b/src/mongo/db/ops/write_ops_parsers.cpp
index e63dbb500d9..935139adfda 100644
--- a/src/mongo/db/ops/write_ops_parsers.cpp
+++ b/src/mongo/db/ops/write_ops_parsers.cpp
@@ -39,11 +39,11 @@
namespace mongo {
+using write_ops::Delete;
+using write_ops::DeleteOpEntry;
using write_ops::Insert;
using write_ops::Update;
-using write_ops::Delete;
using write_ops::UpdateOpEntry;
-using write_ops::DeleteOpEntry;
namespace {
@@ -51,10 +51,7 @@ template <class T>
void checkOpCountForCommand(const T& op, size_t numOps) {
uassert(ErrorCodes::InvalidLength,
str::stream() << "Write batch sizes must be between 1 and "
- << write_ops::kMaxWriteBatchSize
- << ". Got "
- << numOps
- << " operations.",
+ << write_ops::kMaxWriteBatchSize << ". Got " << numOps << " operations.",
numOps != 0 && numOps <= write_ops::kMaxWriteBatchSize);
const auto& stmtIds = op.getWriteCommandBase().getStmtIds();
diff --git a/src/mongo/db/ops/write_ops_parsers_test.cpp b/src/mongo/db/ops/write_ops_parsers_test.cpp
index e9499ecde08..b5074350ef4 100644
--- a/src/mongo/db/ops/write_ops_parsers_test.cpp
+++ b/src/mongo/db/ops/write_ops_parsers_test.cpp
@@ -44,9 +44,7 @@ TEST(CommandWriteOpsParsers, CommonFields_BypassDocumentValidation) {
for (BSONElement bypassDocumentValidation : BSON_ARRAY(true << false << 1 << 0 << 1.0 << 0.0)) {
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "bypassDocumentValidation"
+ << "documents" << BSON_ARRAY(BSONObj()) << "bypassDocumentValidation"
<< bypassDocumentValidation);
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
@@ -61,10 +59,7 @@ TEST(CommandWriteOpsParsers, CommonFields_Ordered) {
for (bool ordered : {true, false}) {
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "ordered"
- << ordered);
+ << "documents" << BSON_ARRAY(BSONObj()) << "ordered" << ordered);
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
auto op = InsertOp::parse(request);
@@ -77,14 +72,8 @@ TEST(CommandWriteOpsParsers, CommonFields_IgnoredFields) {
// These flags are ignored, so there is nothing to check other than that this doesn't throw.
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "maxTimeMS"
- << 1000
- << "shardVersion"
- << BSONObj()
- << "writeConcern"
- << BSONObj());
+ << "documents" << BSON_ARRAY(BSONObj()) << "maxTimeMS" << 1000 << "shardVersion"
+ << BSONObj() << "writeConcern" << BSONObj());
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
InsertOp::parse(request);
@@ -94,10 +83,7 @@ TEST(CommandWriteOpsParsers, CommonFields_IgnoredFields) {
TEST(CommandWriteOpsParsers, GarbageFieldsAtTopLevel_Body) {
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "GARBAGE"
- << BSON_ARRAY(BSONObj()));
+ << "documents" << BSON_ARRAY(BSONObj()) << "GARBAGE" << BSON_ARRAY(BSONObj()));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS(InsertOp::parse(request), AssertionException);
@@ -105,12 +91,10 @@ TEST(CommandWriteOpsParsers, GarbageFieldsAtTopLevel_Body) {
}
TEST(CommandWriteOpsParsers, ErrorOnDuplicateCommonField) {
- auto cmd = BSON("insert"
- << "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "documents"
- << BSON_ARRAY(BSONObj()));
+ auto cmd =
+ BSON("insert"
+ << "bar"
+ << "documents" << BSON_ARRAY(BSONObj()) << "documents" << BSON_ARRAY(BSONObj()));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS(InsertOp::parse(request), AssertionException);
@@ -121,9 +105,7 @@ TEST(CommandWriteOpsParsers, ErrorOnDuplicateCommonFieldBetweenBodyAndSequence)
OpMsgRequest request;
request.body = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "$db"
+ << "documents" << BSON_ARRAY(BSONObj()) << "$db"
<< "foo");
request.sequences = {{"documents",
{
@@ -134,12 +116,10 @@ TEST(CommandWriteOpsParsers, ErrorOnDuplicateCommonFieldBetweenBodyAndSequence)
}
TEST(CommandWriteOpsParsers, ErrorOnWrongSizeStmtIdsArray) {
- auto cmd = BSON("insert"
- << "bar"
- << "documents"
- << BSON_ARRAY(BSONObj() << BSONObj())
- << "stmtIds"
- << BSON_ARRAY(12));
+ auto cmd =
+ BSON("insert"
+ << "bar"
+ << "documents" << BSON_ARRAY(BSONObj() << BSONObj()) << "stmtIds" << BSON_ARRAY(12));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS_CODE(InsertOp::parse(request), AssertionException, ErrorCodes::InvalidLength);
@@ -149,12 +129,8 @@ TEST(CommandWriteOpsParsers, ErrorOnWrongSizeStmtIdsArray) {
TEST(CommandWriteOpsParsers, ErrorOnStmtIdSpecifiedTwoWays) {
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "stmtIds"
- << BSON_ARRAY(12)
- << "stmtId"
- << 13);
+ << "documents" << BSON_ARRAY(BSONObj()) << "stmtIds" << BSON_ARRAY(12)
+ << "stmtId" << 13);
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS_CODE(
@@ -174,10 +150,10 @@ TEST(CommandWriteOpsParsers, GarbageFieldsInUpdateDoc) {
}
TEST(CommandWriteOpsParsers, GarbageFieldsInDeleteDoc) {
- auto cmd = BSON("delete"
- << "bar"
- << "deletes"
- << BSON_ARRAY(BSON("q" << BSONObj() << "limit" << 0 << "GARBAGE" << 1)));
+ auto cmd =
+ BSON("delete"
+ << "bar"
+ << "deletes" << BSON_ARRAY(BSON("q" << BSONObj() << "limit" << 0 << "GARBAGE" << 1)));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS(DeleteOp::parse(request), AssertionException);
@@ -324,12 +300,7 @@ TEST(CommandWriteOpsParsers, Update) {
for (bool multi : {false, true}) {
auto rawUpdate =
BSON("q" << query << "u" << update << "arrayFilters" << BSON_ARRAY(arrayFilter)
- << "multi"
- << multi
- << "upsert"
- << upsert
- << "collation"
- << collation);
+ << "multi" << multi << "upsert" << upsert << "collation" << collation);
auto cmd = BSON("update" << ns.coll() << "updates" << BSON_ARRAY(rawUpdate));
for (bool seq : {false, true}) {
auto request = toOpMsg(ns.db(), cmd, seq);
@@ -365,10 +336,8 @@ TEST(CommandWriteOpsParsers, UpdateWithPipeline) {
<< "en_US");
for (bool upsert : {false, true}) {
for (bool multi : {false, true}) {
- auto rawUpdate = BSON(
- "q" << query["q"] << "u" << update["u"] << "multi" << multi << "upsert" << upsert
- << "collation"
- << collation);
+ auto rawUpdate = BSON("q" << query["q"] << "u" << update["u"] << "multi" << multi
+ << "upsert" << upsert << "collation" << collation);
auto cmd = BSON("update" << ns.coll() << "updates" << BSON_ARRAY(rawUpdate));
for (bool seq : {false, true}) {
auto request = toOpMsg(ns.db(), cmd, seq);
@@ -423,8 +392,7 @@ TEST(CommandWriteOpsParsers, RemoveErrorsWithBadLimit) {
for (BSONElement limit : BSON_ARRAY(-1 << 2 << 0.5)) {
auto cmd = BSON("delete"
<< "bar"
- << "deletes"
- << BSON_ARRAY(BSON("q" << BSONObj() << "limit" << limit)));
+ << "deletes" << BSON_ARRAY(BSON("q" << BSONObj() << "limit" << limit)));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS_CODE(
diff --git a/src/mongo/db/ops/write_ops_retryability.cpp b/src/mongo/db/ops/write_ops_retryability.cpp
index 32a160d433c..866385c73fe 100644
--- a/src/mongo/db/ops/write_ops_retryability.cpp
+++ b/src/mongo/db/ops/write_ops_retryability.cpp
@@ -56,11 +56,8 @@ void validateFindAndModifyRetryability(const FindAndModifyRequest& request,
40606,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " is not compatible with previous write in the transaction of type: "
- << OpType_serializer(oplogEntry.getOpType())
- << ", oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << OpType_serializer(oplogEntry.getOpType()) << ", oplogTs: "
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
request.isRemove());
uassert(40607,
str::stream() << "No pre-image available for findAndModify retry request:"
@@ -71,22 +68,16 @@ void validateFindAndModifyRetryability(const FindAndModifyRequest& request,
40608,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " is not compatible with previous write in the transaction of type: "
- << OpType_serializer(oplogEntry.getOpType())
- << ", oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << OpType_serializer(oplogEntry.getOpType()) << ", oplogTs: "
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
request.isUpsert());
} else {
uassert(
40609,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " is not compatible with previous write in the transaction of type: "
- << OpType_serializer(oplogEntry.getOpType())
- << ", oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << OpType_serializer(oplogEntry.getOpType()) << ", oplogTs: "
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
opType == repl::OpTypeEnum::kUpdate);
if (request.shouldReturnNew()) {
@@ -94,18 +85,14 @@ void validateFindAndModifyRetryability(const FindAndModifyRequest& request,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " wants the document after update returned, but only before "
"update document is stored, oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
oplogWithCorrectLinks.getPostImageOpTime());
} else {
uassert(40612,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " wants the document before update returned, but only after "
"update document is stored, oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
oplogWithCorrectLinks.getPreImageOpTime());
}
}
@@ -129,8 +116,7 @@ BSONObj extractPreOrPostImage(OperationContext* opCtx, const repl::OplogEntry& o
uassert(40613,
str::stream() << "oplog no longer contains the complete write history of this "
"transaction, log with opTime "
- << opTime.toString()
- << " cannot be found",
+ << opTime.toString() << " cannot be found",
!oplogDoc.isEmpty());
auto oplogEntry = uassertStatusOK(repl::OplogEntry::parse(oplogDoc));
@@ -172,8 +158,7 @@ repl::OplogEntry getInnerNestedOplogEntry(const repl::OplogEntry& entry) {
uassert(40635,
str::stream() << "expected nested oplog entry with ts: "
<< entry.getTimestamp().toString()
- << " to have o2 field: "
- << redact(entry.toBSON()),
+ << " to have o2 field: " << redact(entry.toBSON()),
entry.getObject2());
return uassertStatusOK(repl::OplogEntry::parse(*entry.getObject2()));
}
@@ -200,10 +185,8 @@ SingleWriteResult parseOplogEntryForUpdate(const repl::OplogEntry& entry) {
str::stream() << "update retry request is not compatible with previous write in "
"the transaction of type: "
<< OpType_serializer(entry.getOpType())
- << ", oplogTs: "
- << entry.getTimestamp().toString()
- << ", oplog: "
- << redact(entry.toBSON()));
+ << ", oplogTs: " << entry.getTimestamp().toString()
+ << ", oplog: " << redact(entry.toBSON()));
}
return res;
diff --git a/src/mongo/db/ops/write_ops_retryability_test.cpp b/src/mongo/db/ops/write_ops_retryability_test.cpp
index 05c4828dae1..550744fa95c 100644
--- a/src/mongo/db/ops/write_ops_retryability_test.cpp
+++ b/src/mongo/db/ops/write_ops_retryability_test.cpp
@@ -78,15 +78,12 @@ repl::OplogEntry makeOplogEntry(repl::OpTime opTime,
}
TEST_F(WriteOpsRetryability, ParseOplogEntryForUpdate) {
- const auto entry =
- assertGet(repl::OplogEntry::parse(BSON("ts" << Timestamp(50, 10) << "t" << 1LL << "op"
- << "u"
- << "ns"
- << "a.b"
- << "o"
- << BSON("_id" << 1 << "x" << 5)
- << "o2"
- << BSON("_id" << 1))));
+ const auto entry = assertGet(repl::OplogEntry::parse(
+ BSON("ts" << Timestamp(50, 10) << "t" << 1LL << "op"
+ << "u"
+ << "ns"
+ << "a.b"
+ << "o" << BSON("_id" << 1 << "x" << 5) << "o2" << BSON("_id" << 1))));
auto res = parseOplogEntryForUpdate(entry);
@@ -120,8 +117,7 @@ TEST_F(WriteOpsRetryability, ParseOplogEntryForUpsert) {
<< "i"
<< "ns"
<< "a.b"
- << "o"
- << BSON("_id" << 1 << "x" << 5))));
+ << "o" << BSON("_id" << 1 << "x" << 5))));
auto res = parseOplogEntryForUpdate(entry);
@@ -187,8 +183,7 @@ TEST_F(FindAndModifyRetryability, BasicUpsertReturnNew) {
kNs, // namespace
BSON("_id"
<< "ID value"
- << "x"
- << 1)); // o
+ << "x" << 1)); // o
auto result = constructFindAndModifyRetryResult(opCtx(), request, insertOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject"
@@ -197,8 +192,7 @@ TEST_F(FindAndModifyRetryability, BasicUpsertReturnNew) {
<< "value"
<< BSON("_id"
<< "ID value"
- << "x"
- << 1)),
+ << "x" << 1)),
result);
}
@@ -212,15 +206,13 @@ TEST_F(FindAndModifyRetryability, BasicUpsertReturnOld) {
kNs, // namespace
BSON("_id"
<< "ID value"
- << "x"
- << 1)); // o
+ << "x" << 1)); // o
auto result = constructFindAndModifyRetryResult(opCtx(), request, insertOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject"
<< BSON("n" << 1 << "updatedExisting" << false << "upserted"
<< "ID value")
- << "value"
- << BSONNULL),
+ << "value" << BSONNULL),
result);
}
@@ -242,8 +234,7 @@ TEST_F(FindAndModifyRetryability, NestedUpsert) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, insertOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject"
<< BSON("n" << 1 << "updatedExisting" << false << "upserted" << 1)
- << "value"
- << BSON("_id" << 1)),
+ << "value" << BSON("_id" << 1)),
result);
}
@@ -353,8 +344,7 @@ TEST_F(FindAndModifyRetryability, UpdateWithPreImage) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, updateOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject" << BSON("n" << 1 << "updatedExisting" << true)
- << "value"
- << BSON("_id" << 1 << "z" << 1)),
+ << "value" << BSON("_id" << 1 << "z" << 1)),
result);
}
@@ -386,8 +376,7 @@ TEST_F(FindAndModifyRetryability, NestedUpdateWithPreImage) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, updateOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject" << BSON("n" << 1 << "updatedExisting" << true)
- << "value"
- << BSON("_id" << 1 << "z" << 1)),
+ << "value" << BSON("_id" << 1 << "z" << 1)),
result);
}
@@ -413,8 +402,7 @@ TEST_F(FindAndModifyRetryability, UpdateWithPostImage) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, updateOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject" << BSON("n" << 1 << "updatedExisting" << true)
- << "value"
- << BSON("a" << 1 << "b" << 1)),
+ << "value" << BSON("a" << 1 << "b" << 1)),
result);
}
@@ -446,8 +434,7 @@ TEST_F(FindAndModifyRetryability, NestedUpdateWithPostImage) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, updateOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject" << BSON("n" << 1 << "updatedExisting" << true)
- << "value"
- << BSON("a" << 1 << "b" << 1)),
+ << "value" << BSON("a" << 1 << "b" << 1)),
result);
}
diff --git a/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp b/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
index 0f55d053fb3..1e4dbc1c303 100644
--- a/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
+++ b/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
@@ -108,15 +108,15 @@ void PeriodicThreadToAbortExpiredTransactions::_init(ServiceContext* serviceCont
_anchor = std::make_shared<PeriodicJobAnchor>(periodicRunner->makeJob(std::move(job)));
- TransactionParticipant::observeTransactionLifetimeLimitSeconds.addObserver([anchor = _anchor](
- const Argument& secs) {
- try {
- anchor->setPeriod(getPeriod(secs));
- } catch (const DBException& ex) {
- log() << "Failed to update period of thread which aborts expired transactions "
- << ex.toStatus();
- }
- });
+ TransactionParticipant::observeTransactionLifetimeLimitSeconds.addObserver(
+ [anchor = _anchor](const Argument& secs) {
+ try {
+ anchor->setPeriod(getPeriod(secs));
+ } catch (const DBException& ex) {
+ log() << "Failed to update period of thread which aborts expired transactions "
+ << ex.toStatus();
+ }
+ });
}
} // namespace mongo
diff --git a/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp b/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp
index a550f9a3624..252277130e0 100644
--- a/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp
+++ b/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp
@@ -92,16 +92,16 @@ void PeriodicThreadToDecreaseSnapshotHistoryIfNotNeeded::_init(ServiceContext* s
_anchor = std::make_shared<PeriodicJobAnchor>(periodicRunner->makeJob(std::move(job)));
- SnapshotWindowParams::observeCheckCachePressurePeriodSeconds.addObserver([anchor = _anchor](
- const auto& secs) {
- try {
- anchor->setPeriod(Seconds(secs));
- } catch (const DBException& ex) {
- log() << "Failed to update the period of the thread which decreases data history "
- "target window size if there have been no new SnapshotTooOld errors."
- << ex.toStatus();
- }
- });
+ SnapshotWindowParams::observeCheckCachePressurePeriodSeconds.addObserver(
+ [anchor = _anchor](const auto& secs) {
+ try {
+ anchor->setPeriod(Seconds(secs));
+ } catch (const DBException& ex) {
+ log() << "Failed to update the period of the thread which decreases data history "
+ "target window size if there have been no new SnapshotTooOld errors."
+ << ex.toStatus();
+ }
+ });
}
} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator.h b/src/mongo/db/pipeline/accumulator.h
index 801c356020f..80693dbf739 100644
--- a/src/mongo/db/pipeline/accumulator.h
+++ b/src/mongo/db/pipeline/accumulator.h
@@ -351,4 +351,4 @@ public:
private:
MutableDocument _output;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_avg.cpp b/src/mongo/db/pipeline/accumulator_avg.cpp
index 38946678389..43550e9e361 100644
--- a/src/mongo/db/pipeline/accumulator_avg.cpp
+++ b/src/mongo/db/pipeline/accumulator_avg.cpp
@@ -134,4 +134,4 @@ void AccumulatorAvg::reset() {
_decimalTotal = {};
_count = 0;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_first.cpp b/src/mongo/db/pipeline/accumulator_first.cpp
index 3e452f1d0e6..6fcc334af83 100644
--- a/src/mongo/db/pipeline/accumulator_first.cpp
+++ b/src/mongo/db/pipeline/accumulator_first.cpp
@@ -74,4 +74,4 @@ intrusive_ptr<Accumulator> AccumulatorFirst::create(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
return new AccumulatorFirst(expCtx);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_last.cpp b/src/mongo/db/pipeline/accumulator_last.cpp
index 3c667d16d53..4774abca5e9 100644
--- a/src/mongo/db/pipeline/accumulator_last.cpp
+++ b/src/mongo/db/pipeline/accumulator_last.cpp
@@ -68,4 +68,4 @@ intrusive_ptr<Accumulator> AccumulatorLast::create(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
return new AccumulatorLast(expCtx);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_merge_objects.cpp b/src/mongo/db/pipeline/accumulator_merge_objects.cpp
index 4f8ef357f35..8878ff97676 100644
--- a/src/mongo/db/pipeline/accumulator_merge_objects.cpp
+++ b/src/mongo/db/pipeline/accumulator_merge_objects.cpp
@@ -71,8 +71,7 @@ void AccumulatorMergeObjects::processInternal(const Value& input, bool merging)
uassert(40400,
str::stream() << "$mergeObjects requires object inputs, but input " << input.toString()
- << " is of type "
- << typeName(input.getType()),
+ << " is of type " << typeName(input.getType()),
(input.getType() == BSONType::Object));
FieldIterator iter = input.getDocument().fieldIterator();
diff --git a/src/mongo/db/pipeline/accumulator_min_max.cpp b/src/mongo/db/pipeline/accumulator_min_max.cpp
index d81403eac85..496d9d94220 100644
--- a/src/mongo/db/pipeline/accumulator_min_max.cpp
+++ b/src/mongo/db/pipeline/accumulator_min_max.cpp
@@ -89,4 +89,4 @@ intrusive_ptr<Accumulator> AccumulatorMax::create(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
return new AccumulatorMax(expCtx);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_push.cpp b/src/mongo/db/pipeline/accumulator_push.cpp
index becb6828635..5c1f640cef8 100644
--- a/src/mongo/db/pipeline/accumulator_push.cpp
+++ b/src/mongo/db/pipeline/accumulator_push.cpp
@@ -86,4 +86,4 @@ intrusive_ptr<Accumulator> AccumulatorPush::create(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
return new AccumulatorPush(expCtx);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_std_dev.cpp b/src/mongo/db/pipeline/accumulator_std_dev.cpp
index a10da2a41c0..a2bce628539 100644
--- a/src/mongo/db/pipeline/accumulator_std_dev.cpp
+++ b/src/mongo/db/pipeline/accumulator_std_dev.cpp
@@ -118,4 +118,4 @@ void AccumulatorStdDev::reset() {
_mean = 0;
_m2 = 0;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/aggregation_request.cpp b/src/mongo/db/pipeline/aggregation_request.cpp
index 6cf7b38c573..5d550eb7c8b 100644
--- a/src/mongo/db/pipeline/aggregation_request.cpp
+++ b/src/mongo/db/pipeline/aggregation_request.cpp
@@ -131,8 +131,7 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
if (elem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
str::stream() << repl::ReadConcernArgs::kReadConcernFieldName
- << " must be an object, not a "
- << typeName(elem.type())};
+ << " must be an object, not a " << typeName(elem.type())};
}
request.setReadConcern(elem.embeddedObject().getOwned());
} else if (kHintName == fieldName) {
@@ -214,8 +213,8 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
} else if (WriteConcernOptions::kWriteConcernField == fieldName) {
if (elem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << fieldName << " must be an object, not a "
- << typeName(elem.type())};
+ str::stream()
+ << fieldName << " must be an object, not a " << typeName(elem.type())};
}
WriteConcernOptions writeConcern;
@@ -250,23 +249,20 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
if (!hasCursorElem && !hasExplainElem) {
return {ErrorCodes::FailedToParse,
str::stream()
- << "The '"
- << kCursorName
+ << "The '" << kCursorName
<< "' option is required, except for aggregate with the explain argument"};
}
if (request.getExplain() && cmdObj[WriteConcernOptions::kWriteConcernField]) {
return {ErrorCodes::FailedToParse,
str::stream() << "Aggregation explain does not support the'"
- << WriteConcernOptions::kWriteConcernField
- << "' option"};
+ << WriteConcernOptions::kWriteConcernField << "' option"};
}
if (hasNeedsMergeElem && !hasFromMongosElem) {
return {ErrorCodes::FailedToParse,
str::stream() << "Cannot specify '" << kNeedsMergeName << "' without '"
- << kFromMongosName
- << "'"};
+ << kFromMongosName << "'"};
}
return request;
diff --git a/src/mongo/db/pipeline/dependencies.cpp b/src/mongo/db/pipeline/dependencies.cpp
index 6bfdc19bdce..1586a68f96b 100644
--- a/src/mongo/db/pipeline/dependencies.cpp
+++ b/src/mongo/db/pipeline/dependencies.cpp
@@ -282,4 +282,4 @@ Document documentHelper(const BSONObj& bson, const Document& neededFields, int n
Document ParsedDeps::extractFields(const BSONObj& input) const {
return documentHelper(input, _fields, _nFields);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/dependencies.h b/src/mongo/db/pipeline/dependencies.h
index b7e31a6237b..3487584a4a0 100644
--- a/src/mongo/db/pipeline/dependencies.h
+++ b/src/mongo/db/pipeline/dependencies.h
@@ -205,4 +205,4 @@ private:
Document _fields;
int _nFields; // Cache the number of top-level fields needed.
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/dependencies_test.cpp b/src/mongo/db/pipeline/dependencies_test.cpp
index 2fdf25c799b..6d2741a78e4 100644
--- a/src/mongo/db/pipeline/dependencies_test.cpp
+++ b/src/mongo/db/pipeline/dependencies_test.cpp
@@ -147,8 +147,7 @@ TEST(DependenciesToProjectionTest, ShouldAttemptToExcludeOtherFieldsIfOnlyTextSc
deps.setNeedsMetadata(DepsTracker::MetadataType::TEXT_SCORE, true);
ASSERT_BSONOBJ_EQ(deps.toProjection(),
BSON(Document::metaFieldTextScore << metaTextScore << "_id" << 0
- << "$noFieldsNeeded"
- << 1));
+ << "$noFieldsNeeded" << 1));
}
TEST(DependenciesToProjectionTest,
diff --git a/src/mongo/db/pipeline/document.cpp b/src/mongo/db/pipeline/document.cpp
index 1b8d8ecb5cb..17f3110ba9d 100644
--- a/src/mongo/db/pipeline/document.cpp
+++ b/src/mongo/db/pipeline/document.cpp
@@ -288,8 +288,7 @@ BSONObjBuilder& operator<<(BSONObjBuilderValueStream& builder, const Document& d
void Document::toBson(BSONObjBuilder* builder, size_t recursionLevel) const {
uassert(ErrorCodes::Overflow,
str::stream() << "cannot convert document to BSON because it exceeds the limit of "
- << BSONDepth::getMaxAllowableDepth()
- << " levels of nesting",
+ << BSONDepth::getMaxAllowableDepth() << " levels of nesting",
recursionLevel <= BSONDepth::getMaxAllowableDepth());
for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
@@ -587,4 +586,4 @@ Document Document::deserializeForSorter(BufReader& buf, const SorterDeserializeS
return doc.freeze();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document.h b/src/mongo/db/pipeline/document.h
index f465ecce0e2..0ad560e888e 100644
--- a/src/mongo/db/pipeline/document.h
+++ b/src/mongo/db/pipeline/document.h
@@ -770,4 +770,4 @@ inline MutableValue MutableValue::getField(Position pos) {
inline MutableValue MutableValue::getField(StringData key) {
return MutableDocument(*this).getField(key);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_internal.h b/src/mongo/db/pipeline/document_internal.h
index 1d2b1b58951..c922219d000 100644
--- a/src/mongo/db/pipeline/document_internal.h
+++ b/src/mongo/db/pipeline/document_internal.h
@@ -481,4 +481,4 @@ private:
// Defined in document.cpp
static const DocumentStorage kEmptyDoc;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_add_fields.cpp b/src/mongo/db/pipeline/document_source_add_fields.cpp
index 319ef9776c6..2c05a15766a 100644
--- a/src/mongo/db/pipeline/document_source_add_fields.cpp
+++ b/src/mongo/db/pipeline/document_source_add_fields.cpp
@@ -74,4 +74,4 @@ intrusive_ptr<DocumentSource> DocumentSourceAddFields::createFromBson(
return DocumentSourceAddFields::create(elem.Obj(), expCtx, specifiedName);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_bucket.cpp b/src/mongo/db/pipeline/document_source_bucket.cpp
index e7efd9b202e..3245d21b742 100644
--- a/src/mongo/db/pipeline/document_source_bucket.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket.cpp
@@ -37,8 +37,8 @@
namespace mongo {
using boost::intrusive_ptr;
-using std::vector;
using std::list;
+using std::vector;
REGISTER_MULTI_STAGE_ALIAS(bucket,
LiteParsedDocumentSourceDefault::parse,
@@ -58,8 +58,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
uassert(40201,
str::stream() << "Argument to $bucket stage must be an object, but found type: "
- << typeName(elem.type())
- << ".",
+ << typeName(elem.type()) << ".",
elem.type() == BSONType::Object);
const BSONObj bucketObj = elem.embeddedObject();
@@ -86,15 +85,13 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40202,
str::stream() << "The $bucket 'groupBy' field must be defined as a $-prefixed "
"path or an expression, but found: "
- << groupByField.toString(false, false)
- << ".",
+ << groupByField.toString(false, false) << ".",
groupByIsExpressionInObject || groupByIsPrefixedPath);
} else if ("boundaries" == argName) {
uassert(
40200,
str::stream() << "The $bucket 'boundaries' field must be an array, but found type: "
- << typeName(argument.type())
- << ".",
+ << typeName(argument.type()) << ".",
argument.type() == BSONType::Array);
for (auto&& boundaryElem : argument.embeddedObject()) {
@@ -102,8 +99,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40191,
str::stream() << "The $bucket 'boundaries' field must be an array of "
"constant values, but found value: "
- << boundaryElem.toString(false, false)
- << ".",
+ << boundaryElem.toString(false, false) << ".",
exprConst);
boundaryValues.push_back(exprConst->getValue());
}
@@ -111,8 +107,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40192,
str::stream()
<< "The $bucket 'boundaries' field must have at least 2 values, but found "
- << boundaryValues.size()
- << " value(s).",
+ << boundaryValues.size() << " value(s).",
boundaryValues.size() >= 2);
// Make sure that the boundaries are unique, sorted in ascending order, and have the
@@ -126,22 +121,14 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40193,
str::stream() << "All values in the the 'boundaries' option to $bucket "
"must have the same type. Found conflicting types "
- << typeName(lower.getType())
- << " and "
- << typeName(upper.getType())
- << ".",
+ << typeName(lower.getType()) << " and "
+ << typeName(upper.getType()) << ".",
lowerCanonicalType == upperCanonicalType);
uassert(40194,
str::stream()
<< "The 'boundaries' option to $bucket must be sorted, but elements "
- << i - 1
- << " and "
- << i
- << " are not in ascending order ("
- << lower.toString()
- << " is not less than "
- << upper.toString()
- << ").",
+ << i - 1 << " and " << i << " are not in ascending order ("
+ << lower.toString() << " is not less than " << upper.toString() << ").",
pExpCtx->getValueComparator().evaluate(lower < upper));
}
} else if ("default" == argName) {
@@ -151,8 +138,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40195,
str::stream()
<< "The $bucket 'default' field must be a constant expression, but found: "
- << argument.toString(false, false)
- << ".",
+ << argument.toString(false, false) << ".",
exprConst);
defaultValue = exprConst->getValue();
@@ -162,8 +148,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(
40196,
str::stream() << "The $bucket 'output' field must be an object, but found type: "
- << typeName(argument.type())
- << ".",
+ << typeName(argument.type()) << ".",
argument.type() == BSONType::Object);
for (auto&& outputElem : argument.embeddedObject()) {
diff --git a/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp b/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
index da86580ef02..ffed55cd488 100644
--- a/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
@@ -51,10 +51,10 @@
namespace mongo {
namespace {
+using boost::intrusive_ptr;
using std::deque;
-using std::vector;
using std::string;
-using boost::intrusive_ptr;
+using std::vector;
class BucketAutoTests : public AggregationContextFixture {
public:
diff --git a/src/mongo/db/pipeline/document_source_change_stream.cpp b/src/mongo/db/pipeline/document_source_change_stream.cpp
index 5e5861f2971..9050b9990dd 100644
--- a/src/mongo/db/pipeline/document_source_change_stream.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream.cpp
@@ -147,9 +147,7 @@ void DocumentSourceChangeStream::checkValueType(const Value v,
BSONType expectedType) {
uassert(40532,
str::stream() << "Entry field \"" << filedName << "\" should be "
- << typeName(expectedType)
- << ", found: "
- << typeName(v.getType()),
+ << typeName(expectedType) << ", found: " << typeName(v.getType()),
(v.getType() == expectedType));
}
@@ -402,11 +400,12 @@ list<intrusive_ptr<DocumentSource>> buildPipeline(const intrusive_ptr<Expression
// There might not be a starting point if we're on mongos, otherwise we should either have a
// 'resumeAfter' starting point, or should start from the latest majority committed operation.
auto replCoord = repl::ReplicationCoordinator::get(expCtx->opCtx);
- uassert(40573,
- "The $changeStream stage is only supported on replica sets",
- expCtx->inMongos || (replCoord &&
- replCoord->getReplicationMode() ==
- repl::ReplicationCoordinator::Mode::modeReplSet));
+ uassert(
+ 40573,
+ "The $changeStream stage is only supported on replica sets",
+ expCtx->inMongos ||
+ (replCoord &&
+ replCoord->getReplicationMode() == repl::ReplicationCoordinator::Mode::modeReplSet));
if (!startFrom && !expCtx->inMongos) {
startFrom = replCoord->getMyLastAppliedOpTime().getTimestamp();
}
@@ -464,8 +463,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceChangeStream::createFromBson(
str::stream() << "unrecognized value for the 'fullDocument' option to the "
"$changeStream stage. Expected \"default\" or "
"\"updateLookup\", got \""
- << fullDocOption
- << "\"",
+ << fullDocOption << "\"",
fullDocOption == "updateLookup"_sd || fullDocOption == "default"_sd);
const bool shouldLookupPostImage = (fullDocOption == "updateLookup"_sd);
diff --git a/src/mongo/db/pipeline/document_source_change_stream_test.cpp b/src/mongo/db/pipeline/document_source_change_stream_test.cpp
index 32af6d5ef2e..501045ceadf 100644
--- a/src/mongo/db/pipeline/document_source_change_stream_test.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream_test.cpp
@@ -62,8 +62,8 @@ namespace mongo {
namespace {
using boost::intrusive_ptr;
-using repl::OpTypeEnum;
using repl::OplogEntry;
+using repl::OpTypeEnum;
using std::list;
using std::string;
using std::vector;
@@ -423,8 +423,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAtOperationTimeAndResumeAfter
BSON(DSChangeStream::kStageName
<< BSON("resumeAfter"
<< makeResumeToken(kDefaultTs, testUuid(), BSON("x" << 2 << "_id" << 1))
- << "startAtOperationTime"
- << kDefaultTs))
+ << "startAtOperationTime" << kDefaultTs))
.firstElement(),
expCtx),
AssertionException,
@@ -467,8 +466,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAtOperationTimeAndStartAfterO
BSON(DSChangeStream::kStageName
<< BSON("startAfter"
<< makeResumeToken(kDefaultTs, testUuid(), BSON("x" << 2 << "_id" << 1))
- << "startAtOperationTime"
- << kDefaultTs))
+ << "startAtOperationTime" << kDefaultTs))
.firstElement(),
expCtx),
AssertionException,
@@ -629,7 +627,8 @@ TEST_F(ChangeStreamStageTest, TransformUpdateFields) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}},
{
- "updateDescription", D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
+ "updateDescription",
+ D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
},
};
checkTransformation(updateField, expectedUpdateField);
@@ -655,7 +654,8 @@ TEST_F(ChangeStreamStageTest, TransformUpdateFieldsLegacyNoId) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{"x", 1}, {"y", 1}}},
{
- "updateDescription", D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
+ "updateDescription",
+ D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
},
};
checkTransformation(updateField, expectedUpdateField);
@@ -679,7 +679,8 @@ TEST_F(ChangeStreamStageTest, TransformRemoveFields) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{{"_id", 1}, {"x", 2}}}},
{
- "updateDescription", D{{"updatedFields", D{}}, {"removedFields", vector<V>{V("y"_sd)}}},
+ "updateDescription",
+ D{{"updatedFields", D{}}, {"removedFields", vector<V>{V("y"_sd)}}},
}};
checkTransformation(removeField, expectedRemoveField);
}
@@ -1374,7 +1375,8 @@ TEST_F(ChangeStreamStageTest, ClusterTimeMatchesOplogEntry) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}},
{
- "updateDescription", D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
+ "updateDescription",
+ D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
},
};
checkTransformation(updateField, expectedUpdateField);
@@ -1659,9 +1661,9 @@ TEST_F(ChangeStreamStageTest, ResumeAfterWithTokenFromInvalidateShouldFail) {
ResumeTokenData::FromInvalidate::kFromInvalidate);
ASSERT_THROWS_CODE(DSChangeStream::createFromBson(
- BSON(DSChangeStream::kStageName << BSON(
- "resumeAfter" << resumeTokenInvalidate << "startAtOperationTime"
- << kDefaultTs))
+ BSON(DSChangeStream::kStageName
+ << BSON("resumeAfter" << resumeTokenInvalidate
+ << "startAtOperationTime" << kDefaultTs))
.firstElement(),
expCtx),
AssertionException,
@@ -1877,7 +1879,8 @@ TEST_F(ChangeStreamStageDBTest, TransformRemoveFields) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{{"_id", 1}, {"x", 2}}}},
{
- "updateDescription", D{{"updatedFields", D{}}, {"removedFields", vector<V>{V("y"_sd)}}},
+ "updateDescription",
+ D{{"updatedFields", D{}}, {"removedFields", vector<V>{V("y"_sd)}}},
}};
checkTransformation(removeField, expectedRemoveField);
}
diff --git a/src/mongo/db/pipeline/document_source_coll_stats.cpp b/src/mongo/db/pipeline/document_source_coll_stats.cpp
index df6063dad52..9afc1c730e3 100644
--- a/src/mongo/db/pipeline/document_source_coll_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_coll_stats.cpp
@@ -62,28 +62,23 @@ intrusive_ptr<DocumentSource> DocumentSourceCollStats::createFromBson(
if ("latencyStats" == fieldName) {
uassert(40167,
str::stream() << "latencyStats argument must be an object, but got " << elem
- << " of type "
- << typeName(elem.type()),
+ << " of type " << typeName(elem.type()),
elem.type() == BSONType::Object);
if (!elem["histograms"].eoo()) {
uassert(40305,
str::stream() << "histograms option to latencyStats must be bool, got "
- << elem
- << "of type "
- << typeName(elem.type()),
+ << elem << "of type " << typeName(elem.type()),
elem["histograms"].isBoolean());
}
} else if ("storageStats" == fieldName) {
uassert(40279,
str::stream() << "storageStats argument must be an object, but got " << elem
- << " of type "
- << typeName(elem.type()),
+ << " of type " << typeName(elem.type()),
elem.type() == BSONType::Object);
} else if ("count" == fieldName) {
uassert(40480,
str::stream() << "count argument must be an object, but got " << elem
- << " of type "
- << typeName(elem.type()),
+ << " of type " << typeName(elem.type()),
elem.type() == BSONType::Object);
} else {
uasserted(40168, str::stream() << "unrecognized option to $collStats: " << fieldName);
@@ -144,8 +139,8 @@ DocumentSource::GetNextResult DocumentSourceCollStats::getNext() {
pExpCtx->opCtx, pExpCtx->ns, &builder);
if (!status.isOK()) {
uasserted(40481,
- str::stream() << "Unable to retrieve count in $collStats stage: "
- << status.reason());
+ str::stream()
+ << "Unable to retrieve count in $collStats stage: " << status.reason());
}
}
diff --git a/src/mongo/db/pipeline/document_source_current_op.cpp b/src/mongo/db/pipeline/document_source_current_op.cpp
index 0010bd25ce3..cbdf5eae988 100644
--- a/src/mongo/db/pipeline/document_source_current_op.cpp
+++ b/src/mongo/db/pipeline/document_source_current_op.cpp
@@ -153,9 +153,7 @@ DocumentSource::GetNextResult DocumentSourceCurrentOp::getNext() {
if (fieldName == kOpIdFieldName) {
uassert(ErrorCodes::TypeMismatch,
str::stream() << "expected numeric opid for $currentOp response from '"
- << _shardName
- << "' but got: "
- << typeName(elt.type()),
+ << _shardName << "' but got: " << typeName(elt.type()),
elt.isNumber());
std::string shardOpID = (str::stream() << _shardName << ":" << elt.numberInt());
@@ -247,8 +245,8 @@ intrusive_ptr<DocumentSource> DocumentSourceCurrentOp::createFromBson(
(elem.boolean() ? CursorMode::kIncludeCursors : CursorMode::kExcludeCursors);
} else {
uasserted(ErrorCodes::FailedToParse,
- str::stream() << "Unrecognized option '" << fieldName
- << "' in $currentOp stage.");
+ str::stream()
+ << "Unrecognized option '" << fieldName << "' in $currentOp stage.");
}
}
diff --git a/src/mongo/db/pipeline/document_source_current_op.h b/src/mongo/db/pipeline/document_source_current_op.h
index 44055dcb5ad..0aa281f56c6 100644
--- a/src/mongo/db/pipeline/document_source_current_op.h
+++ b/src/mongo/db/pipeline/document_source_current_op.h
@@ -82,8 +82,7 @@ public:
uassert(ErrorCodes::InvalidOptions,
str::stream() << "Aggregation stage " << kStageName << " cannot run with a "
<< "readConcern other than 'local', or in a multi-document "
- << "transaction. Current readConcern: "
- << readConcern.toString(),
+ << "transaction. Current readConcern: " << readConcern.toString(),
readConcern.getLevel() == repl::ReadConcernLevel::kLocalReadConcern);
}
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 1d971d188a8..491fa942be3 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -335,4 +335,4 @@ intrusive_ptr<DocumentSourceCursor> DocumentSourceCursor::create(
new DocumentSourceCursor(collection, std::move(exec), pExpCtx, trackOplogTimestamp));
return source;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_exchange.cpp b/src/mongo/db/pipeline/document_source_exchange.cpp
index f37bbe62cdb..93cb6771f35 100644
--- a/src/mongo/db/pipeline/document_source_exchange.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange.cpp
@@ -124,9 +124,7 @@ Exchange::Exchange(ExchangeSpec spec, std::unique_ptr<Pipeline, PipelineDeleter>
uassert(50951,
str::stream() << "Specified exchange buffer size (" << _maxBufferSize
- << ") exceeds the maximum allowable amount ("
- << kMaxBufferSize
- << ").",
+ << ") exceeds the maximum allowable amount (" << kMaxBufferSize << ").",
_maxBufferSize <= kMaxBufferSize);
for (int idx = 0; idx < _spec.getConsumers(); ++idx) {
@@ -205,8 +203,7 @@ std::vector<size_t> Exchange::extractConsumerIds(
uassert(50950,
str::stream() << "Specified number of exchange consumers (" << nConsumers
- << ") exceeds the maximum allowable amount ("
- << kMaxNumberConsumers
+ << ") exceeds the maximum allowable amount (" << kMaxNumberConsumers
<< ").",
nConsumers <= kMaxNumberConsumers);
@@ -411,8 +408,9 @@ size_t Exchange::getTargetConsumer(const Document& input) {
}
if (elem.type() == BSONType::String && elem.str() == "hashed") {
- kb << "" << BSONElementHasher::hash64(BSON("" << value).firstElement(),
- BSONElementHasher::DEFAULT_HASH_SEED);
+ kb << ""
+ << BSONElementHasher::hash64(BSON("" << value).firstElement(),
+ BSONElementHasher::DEFAULT_HASH_SEED);
} else {
kb << "" << value;
}
diff --git a/src/mongo/db/pipeline/document_source_exchange_test.cpp b/src/mongo/db/pipeline/document_source_exchange_test.cpp
index cd66171a246..ef4f626e7b6 100644
--- a/src/mongo/db/pipeline/document_source_exchange_test.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange_test.cpp
@@ -556,7 +556,6 @@ TEST_F(DocumentSourceExchangeTest, RandomExchangeNConsumerResourceYielding) {
ThreadInfo* threadInfo = &threads[id];
auto handle = _executor->scheduleWork(
[threadInfo, &processedDocs](const executor::TaskExecutor::CallbackArgs& cb) {
-
DocumentSourceExchange* exchange = threadInfo->documentSourceExchange.get();
const auto getNext = [exchange, threadInfo]() {
// Will acquire 'artificalGlobalMutex'. Within getNext() it will be released and
@@ -652,8 +651,7 @@ TEST_F(DocumentSourceExchangeTest, RangeRandomHashExchangeNConsumer) {
TEST_F(DocumentSourceExchangeTest, RejectNoConsumers) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 0);
+ << "consumers" << 0);
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
AssertionException,
@@ -663,10 +661,7 @@ TEST_F(DocumentSourceExchangeTest, RejectNoConsumers) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidKey) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 2));
+ << "consumers" << 1 << "key" << BSON("a" << 2));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
AssertionException,
@@ -676,9 +671,7 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidKey) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyHashExpected) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 1
- << "key"
+ << "consumers" << 1 << "key"
<< BSON("a"
<< "nothash"));
ASSERT_THROWS_CODE(
@@ -690,10 +683,7 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyHashExpected) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyWrongType) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << true));
+ << "consumers" << 1 << "key" << BSON("a" << true));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
AssertionException,
@@ -703,10 +693,7 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyWrongType) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyEmpty) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 1
- << "key"
- << BSON("" << 1));
+ << "consumers" << 1 << "key" << BSON("" << 1));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
AssertionException,
@@ -716,13 +703,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyEmpty) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundaries) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MAXKEY) << BSON("a" << MINKEY))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MAXKEY) << BSON("a" << MINKEY)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -733,13 +715,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundaries) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesMissingMin) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << 0) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << 0) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -750,13 +727,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesMissingMin) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesMissingMax) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << 0))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << 0)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -767,13 +739,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesMissingMax) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesAndConsumerIds) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 2
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 2 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(0 << 1));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -784,13 +751,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesAndConsumerIds) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidPolicyBoundaries) {
BSONObj spec = BSON("policy"
<< "roundrobin"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -801,13 +763,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidPolicyBoundaries) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidConsumerIds) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(1));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -818,11 +775,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidConsumerIds) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidMissingKeys) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 1 << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
diff --git a/src/mongo/db/pipeline/document_source_facet.cpp b/src/mongo/db/pipeline/document_source_facet.cpp
index 6974dfc44f2..e429878d7c5 100644
--- a/src/mongo/db/pipeline/document_source_facet.cpp
+++ b/src/mongo/db/pipeline/document_source_facet.cpp
@@ -94,11 +94,8 @@ vector<pair<string, vector<BSONObj>>> extractRawPipelines(const BSONElement& ele
for (auto&& subPipeElem : facetElem.Obj()) {
uassert(40171,
str::stream() << "elements of arrays in $facet spec must be non-empty objects, "
- << facetName
- << " argument contained an element of type "
- << typeName(subPipeElem.type())
- << ": "
- << subPipeElem,
+ << facetName << " argument contained an element of type "
+ << typeName(subPipeElem.type()) << ": " << subPipeElem,
subPipeElem.type() == BSONType::Object);
rawPipeline.push_back(subPipeElem.embeddedObject());
}
@@ -351,8 +348,7 @@ intrusive_ptr<DocumentSource> DocumentSourceFacet::createFromBson(
}
uassert(ErrorCodes::IllegalOperation,
str::stream() << "$facet pipeline '" << *needsMongoS
- << "' must run on mongoS, but '"
- << *needsShard
+ << "' must run on mongoS, but '" << *needsShard
<< "' requires a shard",
!(needsShard && needsMongoS));
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.cpp b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
index 9f1091fc0e3..a989f8f389d 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
@@ -210,8 +210,7 @@ void DocumentSourceGraphLookUp::doBreadthFirstSearch() {
while (auto next = pipeline->getNext()) {
uassert(40271,
str::stream()
- << "Documents in the '"
- << _from.ns()
+ << "Documents in the '" << _from.ns()
<< "' namespace must contain an _id for de-duplication in $graphLookup",
!(*next)["_id"].missing());
@@ -391,10 +390,8 @@ void DocumentSourceGraphLookUp::serializeToArray(
std::vector<Value>& array, boost::optional<ExplainOptions::Verbosity> explain) const {
// Serialize default options.
MutableDocument spec(DOC("from" << _from.coll() << "as" << _as.fullPath() << "connectToField"
- << _connectToField.fullPath()
- << "connectFromField"
- << _connectFromField.fullPath()
- << "startWith"
+ << _connectToField.fullPath() << "connectFromField"
+ << _connectFromField.fullPath() << "startWith"
<< _startWith->serialize(false)));
// depthField is optional; serialize it if it was specified.
@@ -413,10 +410,10 @@ void DocumentSourceGraphLookUp::serializeToArray(
// If we are explaining, include an absorbed $unwind inside the $graphLookup specification.
if (_unwind && explain) {
const boost::optional<FieldPath> indexPath = (*_unwind)->indexPath();
- spec["unwinding"] = Value(DOC("preserveNullAndEmptyArrays"
- << (*_unwind)->preserveNullAndEmptyArrays()
- << "includeArrayIndex"
- << (indexPath ? Value((*indexPath).fullPath()) : Value())));
+ spec["unwinding"] =
+ Value(DOC("preserveNullAndEmptyArrays"
+ << (*_unwind)->preserveNullAndEmptyArrays() << "includeArrayIndex"
+ << (indexPath ? Value((*indexPath).fullPath()) : Value())));
}
array.push_back(Value(DOC(getSourceName() << spec.freeze())));
@@ -549,8 +546,8 @@ intrusive_ptr<DocumentSource> DocumentSourceGraphLookUp::createFromBson(
argName == "depthField" || argName == "connectToField") {
// All remaining arguments to $graphLookup are expected to be strings.
uassert(40103,
- str::stream() << "expected string as argument for " << argName << ", found: "
- << argument.toString(false, false),
+ str::stream() << "expected string as argument for " << argName
+ << ", found: " << argument.toString(false, false),
argument.type() == String);
}
@@ -566,8 +563,8 @@ intrusive_ptr<DocumentSource> DocumentSourceGraphLookUp::createFromBson(
depthField = boost::optional<FieldPath>(FieldPath(argument.String()));
} else {
uasserted(40104,
- str::stream() << "Unknown argument to $graphLookup: "
- << argument.fieldName());
+ str::stream()
+ << "Unknown argument to $graphLookup: " << argument.fieldName());
}
}
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp b/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp
index 0e402da49a1..27b364ca2cd 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp
+++ b/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp
@@ -247,10 +247,8 @@ TEST_F(DocumentSourceGraphLookUpTest,
ASSERT(next.isEOF());
} else {
FAIL(str::stream() << "Expected either [ " << to0from1.toString() << " ] or [ "
- << to0from2.toString()
- << " ] but found [ "
- << next.getDocument().toString()
- << " ]");
+ << to0from2.toString() << " ] but found [ "
+ << next.getDocument().toString() << " ]");
}
}
diff --git a/src/mongo/db/pipeline/document_source_group_test.cpp b/src/mongo/db/pipeline/document_source_group_test.cpp
index 8ea0cbc912a..ae1083715b9 100644
--- a/src/mongo/db/pipeline/document_source_group_test.cpp
+++ b/src/mongo/db/pipeline/document_source_group_test.cpp
@@ -215,10 +215,10 @@ TEST_F(DocumentSourceGroupTest, ShouldReportMultipleFieldGroupKeysAsARename) {
std::vector<std::pair<std::string, boost::intrusive_ptr<Expression>&>> expressions;
auto doc = std::vector<std::pair<std::string, boost::intrusive_ptr<Expression>>>{{"x", x},
{"y", y}};
- for (auto & [ unused, expression ] : doc)
+ for (auto& [unused, expression] : doc)
children.push_back(std::move(expression));
std::vector<boost::intrusive_ptr<Expression>>::size_type index = 0;
- for (auto & [ fieldName, unused ] : doc) {
+ for (auto& [fieldName, unused] : doc) {
expressions.emplace_back(fieldName, children[index]);
++index;
}
@@ -523,8 +523,9 @@ class AggregateObjectExpression : public ExpressionBase {
return BSON("a" << 6);
}
BSONObj spec() {
- return BSON("_id" << 0 << "z" << BSON("$first" << BSON("x"
- << "$a")));
+ return BSON("_id" << 0 << "z"
+ << BSON("$first" << BSON("x"
+ << "$a")));
}
BSONObj expected() {
return BSON("_id" << 0 << "z" << BSON("x" << 6));
@@ -537,8 +538,9 @@ class AggregateOperatorExpression : public ExpressionBase {
return BSON("a" << 6);
}
BSONObj spec() {
- return BSON("_id" << 0 << "z" << BSON("$first"
- << "$a"));
+ return BSON("_id" << 0 << "z"
+ << BSON("$first"
+ << "$a"));
}
BSONObj expected() {
return BSON("_id" << 0 << "z" << 6);
@@ -635,8 +637,9 @@ class SingleDocument : public CheckResultsBase {
return {DOC("a" << 1)};
}
virtual BSONObj groupSpec() {
- return BSON("_id" << 0 << "a" << BSON("$sum"
- << "$a"));
+ return BSON("_id" << 0 << "a"
+ << BSON("$sum"
+ << "$a"));
}
virtual string expectedResultSetString() {
return "[{_id:0,a:1}]";
@@ -649,8 +652,9 @@ class TwoValuesSingleKey : public CheckResultsBase {
return {DOC("a" << 1), DOC("a" << 2)};
}
virtual BSONObj groupSpec() {
- return BSON("_id" << 0 << "a" << BSON("$push"
- << "$a"));
+ return BSON("_id" << 0 << "a"
+ << BSON("$push"
+ << "$a"));
}
virtual string expectedResultSetString() {
return "[{_id:0,a:[1,2]}]";
@@ -708,8 +712,7 @@ class FourValuesTwoKeysTwoAccumulators : public CheckResultsBase {
<< "list"
<< BSON("$push"
<< "$a")
- << "sum"
- << BSON("$sum" << BSON("$divide" << BSON_ARRAY("$a" << 2))));
+ << "sum" << BSON("$sum" << BSON("$divide" << BSON_ARRAY("$a" << 2))));
}
virtual string expectedResultSetString() {
return "[{_id:0,list:[1,3],sum:2},{_id:1,list:[2,4],sum:3}]";
@@ -770,8 +773,9 @@ class UndefinedAccumulatorValue : public CheckResultsBase {
return {Document()};
}
virtual BSONObj groupSpec() {
- return BSON("_id" << 0 << "first" << BSON("$first"
- << "$missing"));
+ return BSON("_id" << 0 << "first"
+ << BSON("$first"
+ << "$missing"));
}
virtual string expectedResultSetString() {
return "[{_id:0, first:null}]";
diff --git a/src/mongo/db/pipeline/document_source_index_stats.cpp b/src/mongo/db/pipeline/document_source_index_stats.cpp
index 23343699114..c24671624f6 100644
--- a/src/mongo/db/pipeline/document_source_index_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_index_stats.cpp
@@ -85,4 +85,4 @@ Value DocumentSourceIndexStats::serialize(
boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << Document()));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp
index b4dd8a61adf..13a0c173424 100644
--- a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp
+++ b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp
@@ -65,4 +65,4 @@ Value DocumentSourceInternalInhibitOptimization::serialize(
return Value(Document{{getSourceName(), Value{Document{}}}});
}
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h
index 86b919fb848..75f3e637a7d 100644
--- a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h
+++ b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h
@@ -73,4 +73,4 @@ private:
Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
};
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp b/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp
index 3b7eb1f86a8..0eb5a85f0d0 100644
--- a/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp
+++ b/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp
@@ -69,14 +69,12 @@ boost::intrusive_ptr<DocumentSource> DocumentSourceInternalSplitPipeline::create
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "unrecognized field while parsing mergeType: '"
- << elt.fieldNameStringData()
- << "'");
+ << elt.fieldNameStringData() << "'");
}
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "unrecognized field while parsing $_internalSplitPipeline: '"
- << elt.fieldNameStringData()
- << "'");
+ << elt.fieldNameStringData() << "'");
}
}
@@ -120,4 +118,4 @@ Value DocumentSourceInternalSplitPipeline::serialize(
mergeTypeString.empty() ? Value() : Value(mergeTypeString)}}}}});
}
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_internal_split_pipeline.h b/src/mongo/db/pipeline/document_source_internal_split_pipeline.h
index 9d58b7e3fd5..d2d4b14e685 100644
--- a/src/mongo/db/pipeline/document_source_internal_split_pipeline.h
+++ b/src/mongo/db/pipeline/document_source_internal_split_pipeline.h
@@ -85,4 +85,4 @@ private:
HostTypeRequirement _mergeType = HostTypeRequirement::kNone;
};
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp
index 3c189798db9..9195d0aa0aa 100644
--- a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp
+++ b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp
@@ -51,8 +51,7 @@ DocumentSource::GetNextResult DocumentSourceListCachedAndActiveUsers::getNext()
const auto info = std::move(_users.back());
_users.pop_back();
return Document(BSON("username" << info.userName.getUser() << "db" << info.userName.getDB()
- << "active"
- << info.active));
+ << "active" << info.active));
}
return GetNextResult::makeEOF();
diff --git a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h
index 44d2e57ee8e..d984f755dda 100644
--- a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h
+++ b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h
@@ -73,8 +73,7 @@ public:
uassert(ErrorCodes::InvalidOptions,
str::stream() << "Aggregation stage " << kStageName << " cannot run with a "
<< "readConcern other than 'local', or in a multi-document "
- << "transaction. Current readConcern: "
- << readConcern.toString(),
+ << "transaction. Current readConcern: " << readConcern.toString(),
readConcern.getLevel() == repl::ReadConcernLevel::kLocalReadConcern);
}
};
diff --git a/src/mongo/db/pipeline/document_source_list_local_sessions.h b/src/mongo/db/pipeline/document_source_list_local_sessions.h
index 81cc484d618..0954aaa3e36 100644
--- a/src/mongo/db/pipeline/document_source_list_local_sessions.h
+++ b/src/mongo/db/pipeline/document_source_list_local_sessions.h
@@ -84,8 +84,7 @@ public:
uassert(ErrorCodes::InvalidOptions,
str::stream() << "Aggregation stage " << kStageName << " cannot run with a "
<< "readConcern other than 'local', or in a multi-document "
- << "transaction. Current readConcern: "
- << readConcern.toString(),
+ << "transaction. Current readConcern: " << readConcern.toString(),
readConcern.getLevel() == repl::ReadConcernLevel::kLocalReadConcern);
}
diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp
index b1c3d9ad950..ef11b582394 100644
--- a/src/mongo/db/pipeline/document_source_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup.cpp
@@ -260,8 +260,7 @@ DocumentSource::GetNextResult DocumentSourceLookUp::getNext() {
objsize += result->getApproximateSize();
uassert(4568,
str::stream() << "Total size of documents in " << _fromNs.coll()
- << " matching pipeline's $lookup stage exceeds "
- << maxBytes
+ << " matching pipeline's $lookup stage exceeds " << maxBytes
<< " bytes",
objsize <= maxBytes);
@@ -686,8 +685,7 @@ void DocumentSourceLookUp::serializeToArray(
const boost::optional<FieldPath> indexPath = _unwindSrc->indexPath();
output[getSourceName()]["unwinding"] =
Value(DOC("preserveNullAndEmptyArrays"
- << _unwindSrc->preserveNullAndEmptyArrays()
- << "includeArrayIndex"
+ << _unwindSrc->preserveNullAndEmptyArrays() << "includeArrayIndex"
<< (indexPath ? Value(indexPath->fullPath()) : Value())));
}
@@ -809,8 +807,7 @@ intrusive_ptr<DocumentSource> DocumentSourceLookUp::createFromBson(
if (argName == "let") {
uassert(ErrorCodes::FailedToParse,
str::stream() << "$lookup argument '" << argument
- << "' must be an object, is type "
- << argument.type(),
+ << "' must be an object, is type " << argument.type(),
argument.type() == BSONType::Object);
letVariables = argument.Obj();
hasLet = true;
@@ -819,9 +816,7 @@ intrusive_ptr<DocumentSource> DocumentSourceLookUp::createFromBson(
uassert(ErrorCodes::FailedToParse,
str::stream() << "$lookup argument '" << argName << "' must be a string, found "
- << argument
- << ": "
- << argument.type(),
+ << argument << ": " << argument.type(),
argument.type() == BSONType::String);
if (argName == "from") {
diff --git a/src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp b/src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp
index 327fdf6f703..cb24b7b9ae8 100644
--- a/src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp
@@ -43,14 +43,9 @@ Value assertFieldHasType(const Document& fullDoc, StringData fieldName, BSONType
auto val = fullDoc[fieldName];
uassert(40578,
str::stream() << "failed to look up post image after change: expected \"" << fieldName
- << "\" field to have type "
- << typeName(expectedType)
- << ", instead found type "
- << typeName(val.getType())
- << ": "
- << val.toString()
- << ", full object: "
- << fullDoc.toString(),
+ << "\" field to have type " << typeName(expectedType)
+ << ", instead found type " << typeName(val.getType()) << ": "
+ << val.toString() << ", full object: " << fullDoc.toString(),
val.getType() == expectedType);
return val;
}
@@ -88,8 +83,7 @@ NamespaceString DocumentSourceLookupChangePostImage::assertValidNamespace(
// lookup into any namespace.
uassert(40579,
str::stream() << "unexpected namespace during post image lookup: " << nss.ns()
- << ", expected "
- << pExpCtx->ns.ns(),
+ << ", expected " << pExpCtx->ns.ns(),
nss == pExpCtx->ns ||
(pExpCtx->isClusterAggregation() || pExpCtx->isDBAggregation(nss.db())));
@@ -112,8 +106,7 @@ Value DocumentSourceLookupChangePostImage::lookupPostImage(const Document& updat
const auto readConcern = pExpCtx->inMongos
? boost::optional<BSONObj>(BSON("level"
<< "majority"
- << "afterClusterTime"
- << resumeToken.getData().clusterTime))
+ << "afterClusterTime" << resumeToken.getData().clusterTime))
: boost::none;
diff --git a/src/mongo/db/pipeline/document_source_lookup_test.cpp b/src/mongo/db/pipeline/document_source_lookup_test.cpp
index bc406def88e..e5e354caedc 100644
--- a/src/mongo/db/pipeline/document_source_lookup_test.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup_test.cpp
@@ -95,9 +95,7 @@ TEST_F(DocumentSourceLookUpTest, PreservesParentPipelineLetVariables) {
auto docSource = DocumentSourceLookUp::createFromBson(
BSON("$lookup" << BSON("from"
<< "coll"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1))) << "as"
<< "as"))
.firstElement(),
expCtx);
@@ -117,9 +115,7 @@ TEST_F(DocumentSourceLookUpTest, AcceptsPipelineSyntax) {
auto docSource = DocumentSourceLookUp::createFromBson(
BSON("$lookup" << BSON("from"
<< "coll"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1))) << "as"
<< "as"))
.firstElement(),
expCtx);
@@ -229,17 +225,17 @@ TEST_F(DocumentSourceLookUpTest, RejectLookupWhenDepthLimitIsExceeded) {
expCtx->subPipelineDepth = DocumentSourceLookUp::kMaxSubPipelineDepth;
- ASSERT_THROWS_CODE(DocumentSourceLookUp::createFromBson(
- BSON("$lookup" << BSON("from"
- << "coll"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
- << "as"))
- .firstElement(),
- expCtx),
- AssertionException,
- ErrorCodes::MaxSubPipelineDepthExceeded);
+ ASSERT_THROWS_CODE(
+ DocumentSourceLookUp::createFromBson(
+ BSON("$lookup" << BSON("from"
+ << "coll"
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "as"
+ << "as"))
+ .firstElement(),
+ expCtx),
+ AssertionException,
+ ErrorCodes::MaxSubPipelineDepthExceeded);
}
TEST_F(ReplDocumentSourceLookUpTest, RejectsPipelineWithChangeStreamStage) {
@@ -286,8 +282,7 @@ TEST_F(DocumentSourceLookUpTest, RejectsLocalFieldForeignFieldWhenPipelineIsSpec
auto lookupStage = DocumentSourceLookUp::createFromBson(
BSON("$lookup" << BSON("from"
<< "coll"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
<< "localField"
<< "a"
<< "foreignField"
@@ -298,8 +293,7 @@ TEST_F(DocumentSourceLookUpTest, RejectsLocalFieldForeignFieldWhenPipelineIsSpec
expCtx);
FAIL(str::stream()
- << "Expected creation of the "
- << lookupStage->getSourceName()
+ << "Expected creation of the " << lookupStage->getSourceName()
<< " stage to uassert on mix of localField/foreignField and pipeline options");
} catch (const AssertionException& ex) {
ASSERT_EQ(ErrorCodes::FailedToParse, ex.code());
@@ -335,50 +329,50 @@ TEST_F(DocumentSourceLookUpTest, RejectsInvalidLetVariableName) {
expCtx->setResolvedNamespaces(StringMap<ExpressionContext::ResolvedNamespace>{
{fromNs.coll().toString(), {fromNs, std::vector<BSONObj>()}}});
- ASSERT_THROWS_CODE(DocumentSourceLookUp::createFromBson(
- BSON("$lookup" << BSON("from"
- << "coll"
- << "let"
- << BSON("" // Empty variable name.
- << "$a")
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
- << "as"))
- .firstElement(),
- expCtx),
- AssertionException,
- 16866);
-
- ASSERT_THROWS_CODE(DocumentSourceLookUp::createFromBson(
- BSON("$lookup" << BSON("from"
- << "coll"
- << "let"
- << BSON("^invalidFirstChar"
- << "$a")
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
- << "as"))
- .firstElement(),
- expCtx),
- AssertionException,
- 16867);
-
- ASSERT_THROWS_CODE(DocumentSourceLookUp::createFromBson(
- BSON("$lookup" << BSON("from"
- << "coll"
- << "let"
- << BSON("contains.invalidChar"
- << "$a")
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
- << "as"))
- .firstElement(),
- expCtx),
- AssertionException,
- 16868);
+ ASSERT_THROWS_CODE(
+ DocumentSourceLookUp::createFromBson(
+ BSON("$lookup" << BSON("from"
+ << "coll"
+ << "let"
+ << BSON("" // Empty variable name.
+ << "$a")
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "as"
+ << "as"))
+ .firstElement(),
+ expCtx),
+ AssertionException,
+ 16866);
+
+ ASSERT_THROWS_CODE(
+ DocumentSourceLookUp::createFromBson(
+ BSON("$lookup" << BSON("from"
+ << "coll"
+ << "let"
+ << BSON("^invalidFirstChar"
+ << "$a")
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "as"
+ << "as"))
+ .firstElement(),
+ expCtx),
+ AssertionException,
+ 16867);
+
+ ASSERT_THROWS_CODE(
+ DocumentSourceLookUp::createFromBson(
+ BSON("$lookup" << BSON("from"
+ << "coll"
+ << "let"
+ << BSON("contains.invalidChar"
+ << "$a")
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "as"
+ << "as"))
+ .firstElement(),
+ expCtx),
+ AssertionException,
+ 16868);
}
TEST_F(DocumentSourceLookUpTest, ShouldBeAbleToReParseSerializedStage) {
@@ -393,9 +387,7 @@ TEST_F(DocumentSourceLookUpTest, ShouldBeAbleToReParseSerializedStage) {
<< "let"
<< BSON("local_x"
<< "$x")
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1))) << "as"
<< "as"))
.firstElement(),
expCtx);
@@ -729,8 +721,7 @@ TEST_F(DocumentSourceLookUpTest, ShouldCacheNonCorrelatedSubPipelinePrefix) {
auto expectedPipe = fromjson(
str::stream() << "[{mock: {}}, {$match: {x:{$eq: 1}}}, {$sort: {sortKey: {x: 1}}}, "
- << sequentialCacheStageObj()
- << ", {$addFields: {varField: {$const: 5} }}]");
+ << sequentialCacheStageObj() << ", {$addFields: {varField: {$const: 5} }}]");
ASSERT_VALUE_EQ(Value(subPipeline->writeExplainOps(kExplain)), Value(BSONArray(expectedPipe)));
}
@@ -914,8 +905,7 @@ TEST_F(DocumentSourceLookUpTest,
str::stream() << "[{mock: {}}, {$match: {x:{$eq: 1}}}, {$sort: {sortKey: {x: 1}}}, "
"{$lookup: {from: 'coll', as: 'subas', let: {var1: '$y'}, "
"pipeline: [{$match: {$expr: { $eq: ['$z', '$$var1']}}}]}}, "
- << sequentialCacheStageObj()
- << ", {$addFields: {varField: {$const: 5} }}]");
+ << sequentialCacheStageObj() << ", {$addFields: {varField: {$const: 5} }}]");
ASSERT_VALUE_EQ(Value(subPipeline->writeExplainOps(kExplain)), Value(BSONArray(expectedPipe)));
}
@@ -947,8 +937,7 @@ TEST_F(DocumentSourceLookUpTest, ShouldCacheEntirePipelineIfNonCorrelated) {
<< "[{mock: {}}, {$match: {x:{$eq: 1}}}, {$sort: {sortKey: {x: 1}}}, {$lookup: {from: "
"'coll', as: 'subas', let: {}, pipeline: [{$match: {y: 5}}]}}, {$addFields: "
"{constField: {$const: 5}}}, "
- << sequentialCacheStageObj()
- << "]");
+ << sequentialCacheStageObj() << "]");
ASSERT_VALUE_EQ(Value(subPipeline->writeExplainOps(kExplain)), Value(BSONArray(expectedPipe)));
}
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
index 1900a644627..c2c7ce66f29 100644
--- a/src/mongo/db/pipeline/document_source_match.cpp
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -48,8 +48,8 @@ namespace mongo {
using boost::intrusive_ptr;
using std::pair;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
REGISTER_DOCUMENT_SOURCE(match,
diff --git a/src/mongo/db/pipeline/document_source_merge.cpp b/src/mongo/db/pipeline/document_source_merge.cpp
index 2550e5a490f..23129c98f85 100644
--- a/src/mongo/db/pipeline/document_source_merge.cpp
+++ b/src/mongo/db/pipeline/document_source_merge.cpp
@@ -82,7 +82,7 @@ constexpr auto kPipelineDiscardMode = MergeMode{WhenMatched::kPipeline, WhenNotM
*/
MergeStrategy makeUpdateStrategy(bool upsert, BatchTransform transform) {
return [upsert, transform](
- const auto& expCtx, const auto& ns, const auto& wc, auto epoch, auto&& batch) {
+ const auto& expCtx, const auto& ns, const auto& wc, auto epoch, auto&& batch) {
if (transform) {
transform(batch);
}
@@ -103,7 +103,7 @@ MergeStrategy makeUpdateStrategy(bool upsert, BatchTransform transform) {
*/
MergeStrategy makeStrictUpdateStrategy(bool upsert, BatchTransform transform) {
return [upsert, transform](
- const auto& expCtx, const auto& ns, const auto& wc, auto epoch, auto&& batch) {
+ const auto& expCtx, const auto& ns, const auto& wc, auto epoch, auto&& batch) {
if (transform) {
transform(batch);
}
@@ -411,7 +411,7 @@ boost::intrusive_ptr<DocumentSource> DocumentSourceMerge::createFromBson(
mergeSpec.getWhenMatched() ? mergeSpec.getWhenMatched()->mode : kDefaultWhenMatched;
auto whenNotMatched = mergeSpec.getWhenNotMatched().value_or(kDefaultWhenNotMatched);
auto pipeline = mergeSpec.getWhenMatched() ? mergeSpec.getWhenMatched()->pipeline : boost::none;
- auto[mergeOnFields, targetCollectionVersion] =
+ auto [mergeOnFields, targetCollectionVersion] =
expCtx->mongoProcessInterface->ensureFieldsUniqueOrResolveDocumentKey(
expCtx, mergeSpec.getOn(), mergeSpec.getTargetCollectionVersion(), targetNss);
@@ -434,7 +434,7 @@ Value DocumentSourceMerge::serialize(boost::optional<ExplainOptions::Verbosity>
}
BSONObjBuilder bob;
- for (auto && [ name, expr ] : *_letVariables) {
+ for (auto&& [name, expr] : *_letVariables) {
bob << name << expr->serialize(static_cast<bool>(explain));
}
return bob.obj();
diff --git a/src/mongo/db/pipeline/document_source_merge.h b/src/mongo/db/pipeline/document_source_merge.h
index 927c0376245..f7889528930 100644
--- a/src/mongo/db/pipeline/document_source_merge.h
+++ b/src/mongo/db/pipeline/document_source_merge.h
@@ -180,7 +180,7 @@ private:
}
BSONObjBuilder bob;
- for (auto && [ name, expr ] : *_letVariables) {
+ for (auto&& [name, expr] : *_letVariables) {
bob << name << expr->evaluate(doc, &pExpCtx->variables);
}
return bob.obj();
diff --git a/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp b/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp
index e6b26b06c70..3fda91dc77d 100644
--- a/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp
@@ -132,8 +132,8 @@ TEST_F(DocumentSourceMergeCursorsTest, ShouldRejectEmptyArray) {
TEST_F(DocumentSourceMergeCursorsTest, ShouldRejectLegacySerializationFormats) {
// Formats like this were used in old versions of the server but are no longer supported.
- auto spec = BSON("$mergeCursors" << BSON_ARRAY(BSON(
- "ns" << kTestNss.ns() << "id" << 0LL << "host" << kTestHost.toString())));
+ auto spec = BSON("$mergeCursors" << BSON_ARRAY(BSON("ns" << kTestNss.ns() << "id" << 0LL
+ << "host" << kTestHost.toString())));
ASSERT_THROWS_CODE(DocumentSourceMergeCursors::createFromBson(spec.firstElement(), getExpCtx()),
AssertionException,
17026);
diff --git a/src/mongo/db/pipeline/document_source_merge_test.cpp b/src/mongo/db/pipeline/document_source_merge_test.cpp
index 50e75e9d264..dbebf226ced 100644
--- a/src/mongo/db/pipeline/document_source_merge_test.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_test.cpp
@@ -140,8 +140,7 @@ TEST_F(DocumentSourceMergeTest, CorrectlyParsesIfWhenMatchedIsStringOrArray) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSONArray()));
+ << "whenMatched" << BSONArray()));
ASSERT(createMergeStage(spec));
}
@@ -238,14 +237,12 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfIntoIsNotAValidUserCollection) {
TEST_F(DocumentSourceMergeTest, FailsToParseIfDbIsNotString) {
auto spec = BSON("$merge" << BSON("into" << BSON("coll"
<< "target_collection"
- << "db"
- << true)));
+ << "db" << true)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into" << BSON("coll"
<< "target_collection"
- << "db"
- << BSONArray())));
+ << "db" << BSONArray())));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into" << BSON("coll"
@@ -259,14 +256,12 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfDbIsNotString) {
TEST_F(DocumentSourceMergeTest, FailsToParseIfCollIsNotString) {
auto spec = BSON("$merge" << BSON("into" << BSON("db"
<< "target_db"
- << "coll"
- << true)));
+ << "coll" << true)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into" << BSON("db"
<< "target_db"
- << "coll"
- << BSONArray())));
+ << "coll" << BSONArray())));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into" << BSON("db"
@@ -294,40 +289,34 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfDbIsNotAValidDatabaseName) {
TEST_F(DocumentSourceMergeTest, FailsToParseIfWhenMatchedModeIsNotStringOrArray) {
auto spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << true));
+ << "whenMatched" << true));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51191);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << 100));
+ << "whenMatched" << 100));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51191);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSON("" << kDefaultWhenMatchedMode)));
+ << "whenMatched" << BSON("" << kDefaultWhenMatchedMode)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51191);
}
TEST_F(DocumentSourceMergeTest, FailsToParseIfWhenNotMatchedModeIsNotString) {
auto spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenNotMatched"
- << true));
+ << "whenNotMatched" << true));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenNotMatched"
- << BSONArray()));
+ << "whenNotMatched" << BSONArray()));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenNotMatched"
- << BSON("" << kDefaultWhenNotMatchedMode)));
+ << "whenNotMatched" << BSON("" << kDefaultWhenNotMatchedMode)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
}
@@ -371,26 +360,22 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfWhenNotMatchedModeIsUnsupportedStr
TEST_F(DocumentSourceMergeTest, FailsToParseIfOnFieldIsNotStringOrArrayOfStrings) {
auto spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "on"
- << 1));
+ << "on" << 1));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51186);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "on"
- << BSONArray()));
+ << "on" << BSONArray()));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51187);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "on"
- << BSON_ARRAY(1 << 2 << BSON("a" << 3))));
+ << "on" << BSON_ARRAY(1 << 2 << BSON("a" << 3))));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51134);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "on"
- << BSON("_id" << 1)));
+ << "on" << BSON("_id" << 1)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51186);
}
@@ -646,24 +631,21 @@ TEST_F(DocumentSourceMergeTest, CorrectlyHandlesWhenMatchedAndWhenNotMatchedMode
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
+ << "whenMatched" << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
<< "whenNotMatched"
<< "insert"));
ASSERT(createMergeStage(spec));
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
+ << "whenMatched" << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
<< "whenNotMatched"
<< "fail"));
ASSERT(createMergeStage(spec));
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
+ << "whenMatched" << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
<< "whenNotMatched"
<< "discard"));
ASSERT(createMergeStage(spec));
@@ -688,41 +670,33 @@ TEST_F(DocumentSourceMergeTest, CorrectlyHandlesWhenMatchedAndWhenNotMatchedMode
TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
auto let = BSON("foo"
<< "bar");
- auto spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << let
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
- << "whenNotMatched"
- << "insert"));
+ auto spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << let << "whenMatched"
+ << BSON_ARRAY(BSON("$project" << BSON("x" << 1))) << "whenNotMatched"
+ << "insert"));
ASSERT(createMergeStage(spec));
- spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << let
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
- << "whenNotMatched"
- << "fail"));
+ spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << let << "whenMatched"
+ << BSON_ARRAY(BSON("$project" << BSON("x" << 1))) << "whenNotMatched"
+ << "fail"));
ASSERT(createMergeStage(spec));
- spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << let
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
- << "whenNotMatched"
- << "discard"));
+ spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << let << "whenMatched"
+ << BSON_ARRAY(BSON("$project" << BSON("x" << 1))) << "whenNotMatched"
+ << "discard"));
ASSERT(createMergeStage(spec));
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "replace"
<< "whenNotMatched"
<< "insert"));
@@ -730,9 +704,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "replace"
<< "whenNotMatched"
<< "fail"));
@@ -740,9 +712,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "replace"
<< "whenNotMatched"
<< "discard"));
@@ -750,9 +720,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "merge"
<< "whenNotMatched"
<< "insert"));
@@ -760,9 +728,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "merge"
<< "whenNotMatched"
<< "fail"));
@@ -770,9 +736,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "merge"
<< "whenNotMatched"
<< "discard"));
@@ -780,9 +744,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "keepExisting"
<< "whenNotMatched"
<< "insert"));
@@ -790,9 +752,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "fail"
<< "whenNotMatched"
<< "insert"));
@@ -800,12 +760,12 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
}
TEST_F(DocumentSourceMergeTest, SerializeDefaultLetVariable) {
- auto spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
- << "whenNotMatched"
- << "insert"));
+ auto spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "whenMatched" << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
+ << "whenNotMatched"
+ << "insert"));
auto mergeStage = createMergeStage(spec);
auto serialized = mergeStage->serialize().getDocument();
ASSERT_VALUE_EQ(serialized["$merge"]["let"],
@@ -826,11 +786,10 @@ TEST_F(DocumentSourceMergeTest, SerializeLetVariables) {
<< BSON("v1" << 10 << "v2"
<< "foo"
<< "v3"
- << BSON("x" << 1 << "y" << BSON("z"
- << "bar")))
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
+ << BSON("x" << 1 << "y"
+ << BSON("z"
+ << "bar")))
+ << "whenMatched" << pipeline << "whenNotMatched"
<< "insert"));
auto mergeStage = createMergeStage(spec);
ASSERT(mergeStage);
@@ -840,8 +799,9 @@ TEST_F(DocumentSourceMergeTest, SerializeLetVariables) {
Value(BSON("$const"
<< "foo")));
ASSERT_VALUE_EQ(serialized["$merge"]["let"]["v3"],
- Value(BSON("x" << BSON("$const" << 1) << "y" << BSON("z" << BSON("$const"
- << "bar")))));
+ Value(BSON("x" << BSON("$const" << 1) << "y"
+ << BSON("z" << BSON("$const"
+ << "bar")))));
ASSERT_VALUE_EQ(serialized["$merge"]["whenMatched"], Value(pipeline));
}
@@ -853,9 +813,7 @@ TEST_F(DocumentSourceMergeTest, SerializeLetArrayVariable) {
<< "target_collection"
<< "let"
<< BSON("v1" << BSON_ARRAY(1 << "2" << BSON("x" << 1 << "y" << 2)))
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
+ << "whenMatched" << pipeline << "whenNotMatched"
<< "insert"));
auto mergeStage = createMergeStage(spec);
ASSERT(mergeStage);
@@ -877,14 +835,11 @@ TEST_F(DocumentSourceMergeTest, SerializeLetArrayVariable) {
TEST_F(DocumentSourceMergeTest, SerializeNullLetVariablesAsDefault) {
auto pipeline = BSON_ARRAY(BSON("$project" << BSON("x"
<< "1")));
- auto spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << BSONNULL
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
- << "insert"));
+ auto spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << BSONNULL << "whenMatched" << pipeline << "whenNotMatched"
+ << "insert"));
auto mergeStage = createMergeStage(spec);
ASSERT(mergeStage);
auto serialized = mergeStage->serialize().getDocument();
@@ -897,14 +852,11 @@ TEST_F(DocumentSourceMergeTest, SerializeNullLetVariablesAsDefault) {
TEST_F(DocumentSourceMergeTest, SerializeEmptyLetVariables) {
auto pipeline = BSON_ARRAY(BSON("$project" << BSON("x"
<< "1")));
- auto spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << BSONObj()
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
- << "insert"));
+ auto spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << BSONObj() << "whenMatched" << pipeline << "whenNotMatched"
+ << "insert"));
auto mergeStage = createMergeStage(spec);
ASSERT(mergeStage);
auto serialized = mergeStage->serialize().getDocument();
@@ -917,11 +869,7 @@ TEST_F(DocumentSourceMergeTest, OnlyObjectCanBeUsedAsLetVariables) {
<< "1")));
auto spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << 1
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
+ << "let" << 1 << "whenMatched" << pipeline << "whenNotMatched"
<< "insert"));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
@@ -929,18 +877,13 @@ TEST_F(DocumentSourceMergeTest, OnlyObjectCanBeUsedAsLetVariables) {
<< "target_collection"
<< "let"
<< "foo"
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
+ << "whenMatched" << pipeline << "whenNotMatched"
<< "insert"));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << BSON_ARRAY(1 << "2")
- << "whenMatched"
- << pipeline
+ << "let" << BSON_ARRAY(1 << "2") << "whenMatched" << pipeline
<< "whenNotMatched"
<< "insert"));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
diff --git a/src/mongo/db/pipeline/document_source_mock.cpp b/src/mongo/db/pipeline/document_source_mock.cpp
index f4efb3e731e..86e9ebda0ee 100644
--- a/src/mongo/db/pipeline/document_source_mock.cpp
+++ b/src/mongo/db/pipeline/document_source_mock.cpp
@@ -76,4 +76,4 @@ intrusive_ptr<DocumentSourceMock> DocumentSourceMock::createForTest(
}
return new DocumentSourceMock(std::move(results));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
index ab340885632..f8478395328 100644
--- a/src/mongo/db/pipeline/document_source_out.cpp
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -106,8 +106,8 @@ void DocumentSourceOut::initialize() {
DBClientBase* conn = pExpCtx->mongoProcessInterface->directClient();
const auto& outputNs = getOutputNs();
- _tempNs = NamespaceString(str::stream() << outputNs.db() << ".tmp.agg_out."
- << aggOutCounter.addAndFetch(1));
+ _tempNs = NamespaceString(str::stream()
+ << outputNs.db() << ".tmp.agg_out." << aggOutCounter.addAndFetch(1));
// Save the original collection options and index specs so we can check they didn't change
// during computation.
@@ -123,8 +123,8 @@ void DocumentSourceOut::initialize() {
// We will write all results into a temporary collection, then rename the temporary
// collection to be the target collection once we are done.
- _tempNs = NamespaceString(str::stream() << outputNs.db() << ".tmp.agg_out."
- << aggOutCounter.addAndFetch(1));
+ _tempNs = NamespaceString(str::stream()
+ << outputNs.db() << ".tmp.agg_out." << aggOutCounter.addAndFetch(1));
// Create temp collection, copying options from the existing output collection if any.
{
diff --git a/src/mongo/db/pipeline/document_source_plan_cache_stats.cpp b/src/mongo/db/pipeline/document_source_plan_cache_stats.cpp
index d95043e66fe..dfa460c3f9f 100644
--- a/src/mongo/db/pipeline/document_source_plan_cache_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_plan_cache_stats.cpp
@@ -41,14 +41,14 @@ REGISTER_DOCUMENT_SOURCE(planCacheStats,
boost::intrusive_ptr<DocumentSource> DocumentSourcePlanCacheStats::createFromBson(
BSONElement spec, const boost::intrusive_ptr<ExpressionContext>& pExpCtx) {
- uassert(
- ErrorCodes::FailedToParse,
- str::stream() << kStageName << " value must be an object. Found: " << typeName(spec.type()),
- spec.type() == BSONType::Object);
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << kStageName
+ << " value must be an object. Found: " << typeName(spec.type()),
+ spec.type() == BSONType::Object);
uassert(ErrorCodes::FailedToParse,
- str::stream() << kStageName << " parameters object must be empty. Found: "
- << typeName(spec.type()),
+ str::stream() << kStageName
+ << " parameters object must be empty. Found: " << typeName(spec.type()),
spec.embeddedObject().isEmpty());
uassert(50932,
diff --git a/src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp b/src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp
index 3eec42538f2..6980b400972 100644
--- a/src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp
+++ b/src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp
@@ -159,8 +159,7 @@ TEST_F(DocumentSourcePlanCacheStatsTest, ReturnsOnlyMatchingStatsAfterAbsorbingM
<< "baz"),
BSON("foo"
<< "bar"
- << "match"
- << true)};
+ << "match" << true)};
getExpCtx()->mongoProcessInterface =
std::make_shared<PlanCacheStatsMongoProcessInterface>(stats);
diff --git a/src/mongo/db/pipeline/document_source_queue.cpp b/src/mongo/db/pipeline/document_source_queue.cpp
index 80559de1a71..47a77709363 100644
--- a/src/mongo/db/pipeline/document_source_queue.cpp
+++ b/src/mongo/db/pipeline/document_source_queue.cpp
@@ -55,4 +55,4 @@ DocumentSource::GetNextResult DocumentSourceQueue::getNext() {
_queue.pop_front();
return next;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_redact.cpp b/src/mongo/db/pipeline/document_source_redact.cpp
index 3ff60410a95..7afc1eea75a 100644
--- a/src/mongo/db/pipeline/document_source_redact.cpp
+++ b/src/mongo/db/pipeline/document_source_redact.cpp
@@ -161,8 +161,7 @@ boost::optional<Document> DocumentSourceRedact::redactObject(const Document& roo
uasserted(17053,
str::stream() << "$redact's expression should not return anything "
<< "aside from the variables $$KEEP, $$DESCEND, and "
- << "$$PRUNE, but returned "
- << expressionResult.toString());
+ << "$$PRUNE, but returned " << expressionResult.toString());
}
}
@@ -196,4 +195,4 @@ intrusive_ptr<DocumentSource> DocumentSourceRedact::createFromBson(
return source;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_replace_root.cpp b/src/mongo/db/pipeline/document_source_replace_root.cpp
index deefe509bb7..0d144bdc368 100644
--- a/src/mongo/db/pipeline/document_source_replace_root.cpp
+++ b/src/mongo/db/pipeline/document_source_replace_root.cpp
@@ -50,11 +50,8 @@ Document ReplaceRootTransformation::applyTransformation(const Document& input) {
uassert(40228,
str::stream()
<< "'newRoot' expression must evaluate to an object, but resulting value was: "
- << newRoot.toString()
- << ". Type of resulting value: '"
- << typeName(newRoot.getType())
- << "'. Input document: "
- << input.toString(),
+ << newRoot.toString() << ". Type of resulting value: '"
+ << typeName(newRoot.getType()) << "'. Input document: " << input.toString(),
newRoot.getType() == BSONType::Object);
// Turn the value into a document.
@@ -84,8 +81,7 @@ intrusive_ptr<DocumentSource> DocumentSourceReplaceRoot::createFromBson(
<< stageName);
uassert(40229,
str::stream() << "expected an object as specification for " << kStageName
- << " stage, got "
- << typeName(elem.type()),
+ << " stage, got " << typeName(elem.type()),
elem.type() == Object);
auto spec =
diff --git a/src/mongo/db/pipeline/document_source_replace_root_test.cpp b/src/mongo/db/pipeline/document_source_replace_root_test.cpp
index 71c356e98f2..cb71448fa7b 100644
--- a/src/mongo/db/pipeline/document_source_replace_root_test.cpp
+++ b/src/mongo/db/pipeline/document_source_replace_root_test.cpp
@@ -336,14 +336,12 @@ TEST_F(ReplaceRootSpec, CreationRequiresObjectSpecification) {
TEST_F(ReplaceRootSpec, OnlyValidOptionInObjectSpecIsNewRoot) {
ASSERT_THROWS_CODE(createReplaceRoot(createSpec(BSON("newRoot"
<< "$a"
- << "root"
- << 2))),
+ << "root" << 2))),
AssertionException,
40415);
ASSERT_THROWS_CODE(createReplaceRoot(createSpec(BSON("newRoot"
<< "$a"
- << "path"
- << 2))),
+ << "path" << 2))),
AssertionException,
40415);
ASSERT_THROWS_CODE(createReplaceRoot(createSpec(BSON("path"
diff --git a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
index ad84c24e9aa..0f82e7466de 100644
--- a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
@@ -116,9 +116,7 @@ DocumentSource::GetNextResult DocumentSourceSampleFromRandomCursor::getNextNonDu
<< _idField
<< " field in order to de-duplicate results, but encountered a "
"document without a "
- << _idField
- << " field: "
- << nextInput.getDocument().toString(),
+ << _idField << " field: " << nextInput.getDocument().toString(),
!idField.missing());
if (_seenDocs.insert(std::move(idField)).second) {
@@ -163,4 +161,4 @@ intrusive_ptr<DocumentSourceSampleFromRandomCursor> DocumentSourceSampleFromRand
new DocumentSourceSampleFromRandomCursor(expCtx, size, idField, nDocsInCollection));
return source;
}
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_sequential_document_cache.cpp b/src/mongo/db/pipeline/document_source_sequential_document_cache.cpp
index 61814540336..15627744247 100644
--- a/src/mongo/db/pipeline/document_source_sequential_document_cache.cpp
+++ b/src/mongo/db/pipeline/document_source_sequential_document_cache.cpp
@@ -144,12 +144,12 @@ Value DocumentSourceSequentialDocumentCache::serialize(
{kStageName,
Document{{"maxSizeBytes"_sd, Value(static_cast<long long>(_cache->maxSizeBytes()))},
{"status"_sd,
- _cache->isBuilding() ? "kBuilding"_sd : _cache->isServing()
- ? "kServing"_sd
- : "kAbandoned"_sd}}}});
+ _cache->isBuilding()
+ ? "kBuilding"_sd
+ : _cache->isServing() ? "kServing"_sd : "kAbandoned"_sd}}}});
}
return Value();
}
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_sequential_document_cache.h b/src/mongo/db/pipeline/document_source_sequential_document_cache.h
index 19119a1a0f3..0031ca8694b 100644
--- a/src/mongo/db/pipeline/document_source_sequential_document_cache.h
+++ b/src/mongo/db/pipeline/document_source_sequential_document_cache.h
@@ -99,4 +99,4 @@ private:
bool _hasOptimizedPos = false;
};
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_skip.cpp b/src/mongo/db/pipeline/document_source_skip.cpp
index 2eead90aa3f..143a796cdf6 100644
--- a/src/mongo/db/pipeline/document_source_skip.cpp
+++ b/src/mongo/db/pipeline/document_source_skip.cpp
@@ -116,4 +116,4 @@ intrusive_ptr<DocumentSource> DocumentSourceSkip::createFromBson(
return DocumentSourceSkip::create(pExpCtx, nToSkip);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp
index 576541c207b..870394a277c 100644
--- a/src/mongo/db/pipeline/document_source_unwind.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind.cpp
@@ -286,4 +286,4 @@ intrusive_ptr<DocumentSource> DocumentSourceUnwind::createFromBson(
string pathString(Expression::removeFieldPrefix(prefixedPathString));
return DocumentSourceUnwind::create(pExpCtx, pathString, preserveNullAndEmptyArrays, indexPath);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_unwind_test.cpp b/src/mongo/db/pipeline/document_source_unwind_test.cpp
index ec8bb3af24c..2c024fb6f5f 100644
--- a/src/mongo/db/pipeline/document_source_unwind_test.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind_test.cpp
@@ -164,8 +164,7 @@ private:
void createUnwind(bool preserveNullAndEmptyArrays, bool includeArrayIndex) {
auto specObj =
DOC("$unwind" << DOC("path" << unwindFieldPath() << "preserveNullAndEmptyArrays"
- << preserveNullAndEmptyArrays
- << "includeArrayIndex"
+ << preserveNullAndEmptyArrays << "includeArrayIndex"
<< (includeArrayIndex ? Value(indexPath()) : Value())));
_unwind = static_cast<DocumentSourceUnwind*>(
DocumentSourceUnwind::createFromBson(specObj.toBson().firstElement(), ctx()).get());
@@ -475,8 +474,9 @@ class SeveralMoreDocuments : public CheckResultsBase {
deque<DocumentSource::GetNextResult> inputData() override {
return {DOC("_id" << 0 << "a" << BSONNULL),
DOC("_id" << 1),
- DOC("_id" << 2 << "a" << DOC_ARRAY("a"_sd
- << "b"_sd)),
+ DOC("_id" << 2 << "a"
+ << DOC_ARRAY("a"_sd
+ << "b"_sd)),
DOC("_id" << 3),
DOC("_id" << 4 << "a" << DOC_ARRAY(1 << 2 << 3)),
DOC("_id" << 5 << "a" << DOC_ARRAY(4 << 5 << 6)),
@@ -764,8 +764,7 @@ TEST_F(UnwindStageTest, ShouldRejectNonDollarPrefixedPath) {
TEST_F(UnwindStageTest, ShouldRejectNonBoolPreserveNullAndEmptyArrays) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << 2))),
+ << "preserveNullAndEmptyArrays" << 2))),
AssertionException,
28809);
}
@@ -773,8 +772,7 @@ TEST_F(UnwindStageTest, ShouldRejectNonBoolPreserveNullAndEmptyArrays) {
TEST_F(UnwindStageTest, ShouldRejectNonStringIncludeArrayIndex) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "includeArrayIndex"
- << 2))),
+ << "includeArrayIndex" << 2))),
AssertionException,
28810);
}
@@ -806,16 +804,13 @@ TEST_F(UnwindStageTest, ShoudlRejectDollarPrefixedIncludeArrayIndex) {
TEST_F(UnwindStageTest, ShouldRejectUnrecognizedOption) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true
- << "foo"
- << 3))),
+ << "preserveNullAndEmptyArrays" << true
+ << "foo" << 3))),
AssertionException,
28811);
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "foo"
- << 3))),
+ << "foo" << 3))),
AssertionException,
28811);
}
diff --git a/src/mongo/db/pipeline/document_source_writer.h b/src/mongo/db/pipeline/document_source_writer.h
index fd10532d469..ada2fc72a53 100644
--- a/src/mongo/db/pipeline/document_source_writer.h
+++ b/src/mongo/db/pipeline/document_source_writer.h
@@ -193,7 +193,7 @@ DocumentSource::GetNextResult DocumentSourceWriter<B>::getNext() {
waitWhileFailPointEnabled();
auto doc = nextInput.releaseDocument();
- auto[obj, objSize] = makeBatchObject(std::move(doc));
+ auto [obj, objSize] = makeBatchObject(std::move(doc));
bufferedBytes += objSize;
if (!batch.empty() &&
diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp
index b965b86f244..e8e8ce3f0e3 100644
--- a/src/mongo/db/pipeline/expression.cpp
+++ b/src/mongo/db/pipeline/expression.cpp
@@ -112,7 +112,7 @@ struct ParserRegistration {
};
StringMap<ParserRegistration> parserMap;
-}
+} // namespace
void Expression::registerExpression(
string key,
@@ -145,17 +145,16 @@ intrusive_ptr<Expression> Expression::parseExpression(
// Make sure we are allowed to use this expression under the current feature compatibility
// version.
auto& entry = it->second;
- uassert(
- ErrorCodes::QueryFeatureNotAllowed,
- // TODO SERVER-31968 we would like to include the current version and the required minimum
- // version in this error message, but using FeatureCompatibilityVersion::toString() would
- // introduce a dependency cycle.
- str::stream() << opName
- << " is not allowed in the current feature compatibility version. See "
- << feature_compatibility_version_documentation::kCompatibilityLink
- << " for more information.",
- !expCtx->maxFeatureCompatibilityVersion || !entry.requiredMinVersion ||
- (*entry.requiredMinVersion <= *expCtx->maxFeatureCompatibilityVersion));
+ uassert(ErrorCodes::QueryFeatureNotAllowed,
+ // TODO SERVER-31968 we would like to include the current version and the required
+ // minimum version in this error message, but using
+ // FeatureCompatibilityVersion::toString() would introduce a dependency cycle.
+ str::stream() << opName
+ << " is not allowed in the current feature compatibility version. See "
+ << feature_compatibility_version_documentation::kCompatibilityLink
+ << " for more information.",
+ !expCtx->maxFeatureCompatibilityVersion || !entry.requiredMinVersion ||
+ (*entry.requiredMinVersion <= *expCtx->maxFeatureCompatibilityVersion));
return entry.parser(expCtx, obj.firstElement(), vps);
}
@@ -522,13 +521,11 @@ Value ExpressionArrayElemAt::evaluate(const Document& root, Variables* variables
array.isArray());
uassert(28690,
str::stream() << getOpName() << "'s second argument must be a numeric value,"
- << " but is "
- << typeName(indexArg.getType()),
+ << " but is " << typeName(indexArg.getType()),
indexArg.numeric());
uassert(28691,
str::stream() << getOpName() << "'s second argument must be representable as"
- << " a 32-bit integer: "
- << indexArg.coerceToDouble(),
+ << " a 32-bit integer: " << indexArg.coerceToDouble(),
indexArg.integral());
long long i = indexArg.coerceToLong();
@@ -808,7 +805,7 @@ static const CmpLookup cmpLookup[7] = {
// CMP is special. Only name is used.
/* CMP */ {{false, false, false}, ExpressionCompare::CMP, "$cmp"},
};
-}
+} // namespace
Value ExpressionCompare::evaluate(const Document& root, Variables* variables) const {
Value pLeft(_children[0]->evaluate(root, variables));
@@ -1063,8 +1060,8 @@ intrusive_ptr<Expression> ExpressionDateFromParts::parse(
timeZoneElem = arg;
} else {
uasserted(40518,
- str::stream() << "Unrecognized argument to $dateFromParts: "
- << arg.fieldName());
+ str::stream()
+ << "Unrecognized argument to $dateFromParts: " << arg.fieldName());
}
}
@@ -1222,8 +1219,7 @@ bool ExpressionDateFromParts::evaluateNumberWithDefault(const Document& root,
uassert(40515,
str::stream() << "'" << fieldName << "' must evaluate to an integer, found "
- << typeName(fieldValue.getType())
- << " with value "
+ << typeName(fieldValue.getType()) << " with value "
<< fieldValue.toString(),
fieldValue.integral64Bit());
@@ -1241,17 +1237,12 @@ bool ExpressionDateFromParts::evaluateNumberWithDefaultAndBounds(const Document&
bool result =
evaluateNumberWithDefault(root, field, fieldName, defaultValue, returnValue, variables);
- uassert(31034,
- str::stream() << "'" << fieldName << "'"
- << " must evaluate to a value in the range ["
- << kMinValueForDatePart
- << ", "
- << kMaxValueForDatePart
- << "]; value "
- << *returnValue
- << " is not in range",
- !result ||
- (*returnValue >= kMinValueForDatePart && *returnValue <= kMaxValueForDatePart));
+ uassert(
+ 31034,
+ str::stream() << "'" << fieldName << "'"
+ << " must evaluate to a value in the range [" << kMinValueForDatePart << ", "
+ << kMaxValueForDatePart << "]; value " << *returnValue << " is not in range",
+ !result || (*returnValue >= kMinValueForDatePart && *returnValue <= kMaxValueForDatePart));
return result;
}
@@ -1289,9 +1280,7 @@ Value ExpressionDateFromParts::evaluate(const Document& root, Variables* variabl
uassert(40523,
str::stream() << "'year' must evaluate to an integer in the range " << 0 << " to "
- << 9999
- << ", found "
- << year,
+ << 9999 << ", found " << year,
year >= 0 && year <= 9999);
return Value(
@@ -1313,10 +1302,7 @@ Value ExpressionDateFromParts::evaluate(const Document& root, Variables* variabl
uassert(31095,
str::stream() << "'isoWeekYear' must evaluate to an integer in the range " << 0
- << " to "
- << 9999
- << ", found "
- << isoWeekYear,
+ << " to " << 9999 << ", found " << isoWeekYear,
isoWeekYear >= 0 && isoWeekYear <= 9999);
return Value(timeZone->createFromIso8601DateParts(
@@ -1393,8 +1379,8 @@ intrusive_ptr<Expression> ExpressionDateFromString::parse(
onErrorElem = arg;
} else {
uasserted(40541,
- str::stream() << "Unrecognized argument to $dateFromString: "
- << arg.fieldName());
+ str::stream()
+ << "Unrecognized argument to $dateFromString: " << arg.fieldName());
}
}
@@ -1476,8 +1462,7 @@ Value ExpressionDateFromString::evaluate(const Document& root, Variables* variab
if (!formatValue.nullish()) {
uassert(40684,
str::stream() << "$dateFromString requires that 'format' be a string, found: "
- << typeName(formatValue.getType())
- << " with value "
+ << typeName(formatValue.getType()) << " with value "
<< formatValue.toString(),
formatValue.getType() == BSONType::String);
@@ -1498,8 +1483,7 @@ Value ExpressionDateFromString::evaluate(const Document& root, Variables* variab
try {
uassert(ErrorCodes::ConversionFailure,
str::stream() << "$dateFromString requires that 'dateString' be a string, found: "
- << typeName(dateString.getType())
- << " with value "
+ << typeName(dateString.getType()) << " with value "
<< dateString.toString(),
dateString.getType() == BSONType::String);
@@ -1575,8 +1559,8 @@ intrusive_ptr<Expression> ExpressionDateToParts::parse(
isoDateElem = arg;
} else {
uasserted(40520,
- str::stream() << "Unrecognized argument to $dateToParts: "
- << arg.fieldName());
+ str::stream()
+ << "Unrecognized argument to $dateToParts: " << arg.fieldName());
}
}
@@ -1723,8 +1707,8 @@ intrusive_ptr<Expression> ExpressionDateToString::parse(
onNullElem = arg;
} else {
uasserted(18534,
- str::stream() << "Unrecognized argument to $dateToString: "
- << arg.fieldName());
+ str::stream()
+ << "Unrecognized argument to $dateToString: " << arg.fieldName());
}
}
@@ -1794,8 +1778,7 @@ Value ExpressionDateToString::evaluate(const Document& root, Variables* variable
if (!formatValue.nullish()) {
uassert(18533,
str::stream() << "$dateToString requires that 'format' be a string, found: "
- << typeName(formatValue.getType())
- << " with value "
+ << typeName(formatValue.getType()) << " with value "
<< formatValue.toString(),
formatValue.getType() == BSONType::String);
@@ -1869,9 +1852,7 @@ Value ExpressionDivide::evaluate(const Document& root, Variables* variables) con
} else {
uasserted(16609,
str::stream() << "$divide only supports numeric types, not "
- << typeName(lhs.getType())
- << " and "
- << typeName(rhs.getType()));
+ << typeName(lhs.getType()) << " and " << typeName(rhs.getType()));
}
}
@@ -2028,8 +2009,7 @@ intrusive_ptr<ExpressionFieldPath> ExpressionFieldPath::parse(
if (varId == Variables::kNowId || varId == Variables::kClusterTimeId) {
uassert(ErrorCodes::QueryFeatureNotAllowed,
str::stream()
- << "'$$"
- << varName
+ << "'$$" << varName
<< "' is not allowed in the current feature compatibility version. See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< " for more information.",
@@ -2248,9 +2228,8 @@ intrusive_ptr<Expression> ExpressionFilter::optimize() {
}
Value ExpressionFilter::serialize(bool explain) const {
- return Value(
- DOC("$filter" << DOC("input" << _input->serialize(explain) << "as" << _varName << "cond"
- << _filter->serialize(explain))));
+ return Value(DOC("$filter" << DOC("input" << _input->serialize(explain) << "as" << _varName
+ << "cond" << _filter->serialize(explain))));
}
Value ExpressionFilter::evaluate(const Document& root, Variables* variables) const {
@@ -2667,9 +2646,7 @@ Value ExpressionMod::evaluate(const Document& root, Variables* variables) const
} else {
uasserted(16611,
str::stream() << "$mod only supports numeric types, not "
- << typeName(lhs.getType())
- << " and "
- << typeName(rhs.getType()));
+ << typeName(lhs.getType()) << " and " << typeName(rhs.getType()));
}
}
@@ -2789,15 +2766,12 @@ void uassertIfNotIntegralAndNonNegative(Value val,
StringData argumentName) {
uassert(40096,
str::stream() << expressionName << "requires an integral " << argumentName
- << ", found a value of type: "
- << typeName(val.getType())
- << ", with value: "
- << val.toString(),
+ << ", found a value of type: " << typeName(val.getType())
+ << ", with value: " << val.toString(),
val.integral());
uassert(40097,
str::stream() << expressionName << " requires a nonnegative " << argumentName
- << ", found: "
- << val.toString(),
+ << ", found: " << val.toString(),
val.coerceToInt() >= 0);
}
@@ -2907,8 +2881,7 @@ intrusive_ptr<Expression> ExpressionIndexOfArray::optimize() {
}
uassert(50809,
str::stream() << "First operand of $indexOfArray must be an array. First "
- << "argument is of type: "
- << typeName(valueArray.getType()),
+ << "argument is of type: " << typeName(valueArray.getType()),
valueArray.isArray());
auto arr = valueArray.getArray();
@@ -3464,7 +3437,7 @@ bool representableAsLong(long long base, long long exp) {
return base >= kBaseLimits[exp].min && base <= kBaseLimits[exp].max;
};
-}
+} // namespace
/* ----------------------- ExpressionPow ---------------------------- */
@@ -3778,7 +3751,7 @@ ValueSet arrayToSet(const Value& val, const ValueComparator& valueComparator) {
valueSet.insert(array.begin(), array.end());
return valueSet;
}
-}
+} // namespace
/* ----------------------- ExpressionSetDifference ---------------------------- */
@@ -3792,13 +3765,11 @@ Value ExpressionSetDifference::evaluate(const Document& root, Variables* variabl
uassert(17048,
str::stream() << "both operands of $setDifference must be arrays. First "
- << "argument is of type: "
- << typeName(lhs.getType()),
+ << "argument is of type: " << typeName(lhs.getType()),
lhs.isArray());
uassert(17049,
str::stream() << "both operands of $setDifference must be arrays. Second "
- << "argument is of type: "
- << typeName(rhs.getType()),
+ << "argument is of type: " << typeName(rhs.getType()),
rhs.isArray());
ValueSet rhsSet = arrayToSet(rhs, getExpressionContext()->getValueComparator());
@@ -3837,8 +3808,7 @@ Value ExpressionSetEquals::evaluate(const Document& root, Variables* variables)
const Value nextEntry = _children[i]->evaluate(root, variables);
uassert(17044,
str::stream() << "All operands of $setEquals must be arrays. One "
- << "argument is of type: "
- << typeName(nextEntry.getType()),
+ << "argument is of type: " << typeName(nextEntry.getType()),
nextEntry.isArray());
if (i == 0) {
@@ -3876,8 +3846,7 @@ Value ExpressionSetIntersection::evaluate(const Document& root, Variables* varia
}
uassert(17047,
str::stream() << "All operands of $setIntersection must be arrays. One "
- << "argument is of type: "
- << typeName(nextEntry.getType()),
+ << "argument is of type: " << typeName(nextEntry.getType()),
nextEntry.isArray());
if (i == 0) {
@@ -3924,7 +3893,7 @@ Value setIsSubsetHelper(const vector<Value>& lhs, const ValueSet& rhs) {
}
return Value(true);
}
-}
+} // namespace
Value ExpressionSetIsSubset::evaluate(const Document& root, Variables* variables) const {
const Value lhs = _children[0]->evaluate(root, variables);
@@ -3932,13 +3901,11 @@ Value ExpressionSetIsSubset::evaluate(const Document& root, Variables* variables
uassert(17046,
str::stream() << "both operands of $setIsSubset must be arrays. First "
- << "argument is of type: "
- << typeName(lhs.getType()),
+ << "argument is of type: " << typeName(lhs.getType()),
lhs.isArray());
uassert(17042,
str::stream() << "both operands of $setIsSubset must be arrays. Second "
- << "argument is of type: "
- << typeName(rhs.getType()),
+ << "argument is of type: " << typeName(rhs.getType()),
rhs.isArray());
return setIsSubsetHelper(lhs.getArray(),
@@ -3966,8 +3933,7 @@ public:
uassert(17310,
str::stream() << "both operands of $setIsSubset must be arrays. First "
- << "argument is of type: "
- << typeName(lhs.getType()),
+ << "argument is of type: " << typeName(lhs.getType()),
lhs.isArray());
return setIsSubsetHelper(lhs.getArray(), _cachedRhsSet);
@@ -3989,8 +3955,7 @@ intrusive_ptr<Expression> ExpressionSetIsSubset::optimize() {
const Value rhs = ec->getValue();
uassert(17311,
str::stream() << "both operands of $setIsSubset must be arrays. Second "
- << "argument is of type: "
- << typeName(rhs.getType()),
+ << "argument is of type: " << typeName(rhs.getType()),
rhs.isArray());
intrusive_ptr<Expression> optimizedWithConstant(
@@ -4019,8 +3984,7 @@ Value ExpressionSetUnion::evaluate(const Document& root, Variables* variables) c
}
uassert(17043,
str::stream() << "All operands of $setUnion must be arrays. One argument"
- << " is of type: "
- << typeName(newEntries.getType()),
+ << " is of type: " << typeName(newEntries.getType()),
newEntries.isArray());
unionedSet.insert(newEntries.getArray().begin(), newEntries.getArray().end());
@@ -4060,18 +4024,15 @@ Value ExpressionSlice::evaluate(const Document& root, Variables* variables) cons
uassert(28724,
str::stream() << "First argument to $slice must be an array, but is"
- << " of type: "
- << typeName(arrayVal.getType()),
+ << " of type: " << typeName(arrayVal.getType()),
arrayVal.isArray());
uassert(28725,
str::stream() << "Second argument to $slice must be a numeric value,"
- << " but is of type: "
- << typeName(arg2.getType()),
+ << " but is of type: " << typeName(arg2.getType()),
arg2.numeric());
uassert(28726,
str::stream() << "Second argument to $slice can't be represented as"
- << " a 32-bit integer: "
- << arg2.coerceToDouble(),
+ << " a 32-bit integer: " << arg2.coerceToDouble(),
arg2.integral());
const auto& array = arrayVal.getArray();
@@ -4111,13 +4072,11 @@ Value ExpressionSlice::evaluate(const Document& root, Variables* variables) cons
uassert(28727,
str::stream() << "Third argument to $slice must be numeric, but "
- << "is of type: "
- << typeName(countVal.getType()),
+ << "is of type: " << typeName(countVal.getType()),
countVal.numeric());
uassert(28728,
str::stream() << "Third argument to $slice can't be represented"
- << " as a 32-bit integer: "
- << countVal.coerceToDouble(),
+ << " as a 32-bit integer: " << countVal.coerceToDouble(),
countVal.integral());
uassert(28729,
str::stream() << "Third argument to $slice must be positive: "
@@ -4266,23 +4225,20 @@ Value ExpressionSubstrBytes::evaluate(const Document& root, Variables* variables
uassert(16034,
str::stream() << getOpName()
<< ": starting index must be a numeric type (is BSON type "
- << typeName(pLower.getType())
- << ")",
+ << typeName(pLower.getType()) << ")",
(pLower.getType() == NumberInt || pLower.getType() == NumberLong ||
pLower.getType() == NumberDouble));
uassert(16035,
str::stream() << getOpName() << ": length must be a numeric type (is BSON type "
- << typeName(pLength.getType())
- << ")",
+ << typeName(pLength.getType()) << ")",
(pLength.getType() == NumberInt || pLength.getType() == NumberLong ||
pLength.getType() == NumberDouble));
const long long signedLower = pLower.coerceToLong();
uassert(50752,
- str::stream() << getOpName() << ": starting index must be non-negative (got: "
- << signedLower
- << ")",
+ str::stream() << getOpName()
+ << ": starting index must be non-negative (got: " << signedLower << ")",
signedLower >= 0);
const string::size_type lower = static_cast<string::size_type>(signedLower);
@@ -4330,8 +4286,7 @@ Value ExpressionSubstrCP::evaluate(const Document& root, Variables* variables) c
std::string str = inputVal.coerceToString();
uassert(34450,
str::stream() << getOpName() << ": starting index must be a numeric type (is BSON type "
- << typeName(lowerVal.getType())
- << ")",
+ << typeName(lowerVal.getType()) << ")",
lowerVal.numeric());
uassert(34451,
str::stream() << getOpName()
@@ -4340,8 +4295,7 @@ Value ExpressionSubstrCP::evaluate(const Document& root, Variables* variables) c
lowerVal.integral());
uassert(34452,
str::stream() << getOpName() << ": length must be a numeric type (is BSON type "
- << typeName(lengthVal.getType())
- << ")",
+ << typeName(lengthVal.getType()) << ")",
lengthVal.numeric());
uassert(34453,
str::stream() << getOpName()
@@ -4476,8 +4430,8 @@ Value ExpressionSubtract::evaluate(const Document& root, Variables* variables) c
return Value(lhs.getDate() - Milliseconds(rhs.coerceToLong()));
} else {
uasserted(16613,
- str::stream() << "cant $subtract a " << typeName(rhs.getType())
- << " from a Date");
+ str::stream()
+ << "cant $subtract a " << typeName(rhs.getType()) << " from a Date");
}
} else {
uasserted(16556,
@@ -4603,7 +4557,7 @@ boost::intrusive_ptr<Expression> ExpressionSwitch::optimize() {
_default = _default->optimize();
}
- for (auto && [ switchCase, switchThen ] : _branches) {
+ for (auto&& [switchCase, switchThen] : _branches) {
switchCase = switchCase->optimize();
switchThen = switchThen->optimize();
}
@@ -4760,8 +4714,7 @@ std::vector<StringData> extractCodePointsFromChars(StringData utf8String,
}
uassert(50697,
str::stream()
- << "Failed to parse \"chars\" argument to "
- << expressionName
+ << "Failed to parse \"chars\" argument to " << expressionName
<< ": Detected invalid UTF-8. Missing expected continuation byte at end of string.",
i <= utf8String.size());
return codePoints;
@@ -4775,10 +4728,8 @@ Value ExpressionTrim::evaluate(const Document& root, Variables* variables) const
}
uassert(50699,
str::stream() << _name << " requires its input to be a string, got "
- << unvalidatedInput.toString()
- << " (of type "
- << typeName(unvalidatedInput.getType())
- << ") instead.",
+ << unvalidatedInput.toString() << " (of type "
+ << typeName(unvalidatedInput.getType()) << ") instead.",
unvalidatedInput.getType() == BSONType::String);
const StringData input(unvalidatedInput.getStringData());
@@ -4791,10 +4742,8 @@ Value ExpressionTrim::evaluate(const Document& root, Variables* variables) const
}
uassert(50700,
str::stream() << _name << " requires 'chars' to be a string, got "
- << unvalidatedUserChars.toString()
- << " (of type "
- << typeName(unvalidatedUserChars.getType())
- << ") instead.",
+ << unvalidatedUserChars.toString() << " (of type "
+ << typeName(unvalidatedUserChars.getType()) << ") instead.",
unvalidatedUserChars.getType() == BSONType::String);
return Value(
@@ -4893,11 +4842,8 @@ void assertFlagsValid(uint32_t flags,
long long precisionValue) {
uassert(51080,
str::stream() << "invalid conversion from Decimal128 result in " << opName
- << " resulting from arguments: ["
- << numericValue
- << ", "
- << precisionValue
- << "]",
+ << " resulting from arguments: [" << numericValue << ", "
+ << precisionValue << "]",
!Decimal128::hasFlag(flags, Decimal128::kInvalid));
}
@@ -4930,8 +4876,7 @@ static Value evaluateRoundOrTrunc(const Document& root,
precisionArg.integral());
uassert(51083,
str::stream() << "cannot apply " << opName << " with precision value "
- << precisionValue
- << " value must be in [-20, 100]",
+ << precisionValue << " value must be in [-20, 100]",
minPrecision <= precisionValue && precisionValue <= maxPrecision);
}
@@ -5200,8 +5145,7 @@ Value ExpressionZip::serialize(bool explain) const {
}
return Value(DOC("$zip" << DOC("inputs" << Value(serializedInput) << "defaults"
- << Value(serializedDefaults)
- << "useLongestLength"
+ << Value(serializedDefaults) << "useLongestLength"
<< serializedUseLongestLength)));
}
@@ -5236,9 +5180,10 @@ public:
//
table[BSONType::NumberDouble][BSONType::NumberDouble] = &performIdentityConversion;
table[BSONType::NumberDouble][BSONType::String] = &performFormatDouble;
- table[BSONType::NumberDouble]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::NumberDouble][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::NumberDouble][BSONType::Date] = &performCastNumberToDate;
table[BSONType::NumberDouble][BSONType::NumberInt] = &performCastDoubleToInt;
table[BSONType::NumberDouble][BSONType::NumberLong] = &performCastDoubleToLong;
@@ -5254,11 +5199,11 @@ public:
table[BSONType::String][BSONType::String] = &performIdentityConversion;
table[BSONType::String][BSONType::jstOID] = &parseStringToOID;
table[BSONType::String][BSONType::Bool] = &performConvertToTrue;
- table[BSONType::String][BSONType::Date] = [](
- const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
- return Value(expCtx->timeZoneDatabase->fromString(inputValue.getStringData(),
- mongo::TimeZoneDatabase::utcZone()));
- };
+ table[BSONType::String][BSONType::Date] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(expCtx->timeZoneDatabase->fromString(
+ inputValue.getStringData(), mongo::TimeZoneDatabase::utcZone()));
+ };
table[BSONType::String][BSONType::NumberInt] = &parseStringToNumber<int, 10>;
table[BSONType::String][BSONType::NumberLong] = &parseStringToNumber<long long, 10>;
table[BSONType::String][BSONType::NumberDecimal] = &parseStringToNumber<Decimal128, 0>;
@@ -5315,9 +5260,10 @@ public:
inputValue.getDate());
return Value(dateString);
};
- table[BSONType::Date]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::Date][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::Date][BSONType::Date] = &performIdentityConversion;
table[BSONType::Date][BSONType::NumberLong] =
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
@@ -5340,9 +5286,10 @@ public:
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
return Value(static_cast<std::string>(str::stream() << inputValue.getInt()));
};
- table[BSONType::NumberInt]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::NumberInt][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::NumberInt][BSONType::NumberInt] = &performIdentityConversion;
table[BSONType::NumberInt][BSONType::NumberLong] =
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
@@ -5364,9 +5311,10 @@ public:
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
return Value(static_cast<std::string>(str::stream() << inputValue.getLong()));
};
- table[BSONType::NumberLong]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::NumberLong][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::NumberLong][BSONType::Date] = &performCastNumberToDate;
table[BSONType::NumberLong][BSONType::NumberInt] = &performCastLongToInt;
table[BSONType::NumberLong][BSONType::NumberLong] = &performIdentityConversion;
@@ -5383,9 +5331,10 @@ public:
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
return Value(inputValue.getDecimal().toString());
};
- table[BSONType::NumberDecimal]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::NumberDecimal][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::NumberDecimal][BSONType::Date] = &performCastNumberToDate;
table[BSONType::NumberDecimal][BSONType::NumberInt] =
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
@@ -5432,8 +5381,7 @@ public:
uassert(ErrorCodes::ConversionFailure,
str::stream() << "Unsupported conversion from " << typeName(inputType) << " to "
- << typeName(targetType)
- << " in $convert with no onError value",
+ << typeName(targetType) << " in $convert with no onError value",
foundFunction);
return foundFunction;
}
@@ -5607,8 +5555,7 @@ private:
Status parseStatus = parseNumberFromStringWithBase(stringValue, base, &result);
uassert(ErrorCodes::ConversionFailure,
str::stream() << "Failed to parse number '" << stringValue
- << "' in $convert with no onError value: "
- << parseStatus.reason(),
+ << "' in $convert with no onError value: " << parseStatus.reason(),
parseStatus.isOK());
return Value(result);
@@ -5623,8 +5570,7 @@ private:
// and returned.
uasserted(ErrorCodes::ConversionFailure,
str::stream() << "Failed to parse objectId '" << inputValue.getString()
- << "' in $convert with no onError value: "
- << ex.reason());
+ << "' in $convert with no onError value: " << ex.reason());
}
}
@@ -5643,7 +5589,6 @@ Expression::Parser makeConversionAlias(const StringData shortcutName, BSONType t
return [=](const intrusive_ptr<ExpressionContext>& expCtx,
BSONElement elem,
const VariablesParseState& vps) -> intrusive_ptr<Expression> {
-
// Use parseArguments to allow for a singleton array, or the unwrapped version.
auto operands = ExpressionNary::parseArguments(expCtx, elem, vps);
@@ -5718,8 +5663,8 @@ intrusive_ptr<Expression> ExpressionConvert::parse(
onNull = parseOperand(expCtx, elem, vps);
} else {
uasserted(ErrorCodes::FailedToParse,
- str::stream() << "$convert found an unknown argument: "
- << elem.fieldNameStringData());
+ str::stream()
+ << "$convert found an unknown argument: " << elem.fieldNameStringData());
}
}
@@ -5845,8 +5790,8 @@ auto CommonRegexParse(const boost::intrusive_ptr<ExpressionContext>& expCtx,
const VariablesParseState& vpsIn,
StringData opName) {
uassert(51103,
- str::stream() << opName << " expects an object of named arguments but found: "
- << expr.type(),
+ str::stream() << opName
+ << " expects an object of named arguments but found: " << expr.type(),
expr.type() == BSONType::Object);
struct {
@@ -5918,8 +5863,7 @@ int ExpressionRegex::execute(RegexExecutionState* regexState) const {
// capacity is not sufficient to hold all the results. The latter scenario should never occur.
uassert(51156,
str::stream() << "Error occurred while executing the regular expression in " << _opName
- << ". Result code: "
- << execResult,
+ << ". Result code: " << execResult,
execResult == -1 || execResult == (regexState->numCaptures + 1));
return execResult;
}
@@ -6109,7 +6053,7 @@ boost::intrusive_ptr<Expression> ExpressionRegexFind::parse(
BSONElement expr,
const VariablesParseState& vpsIn) {
auto opName = "$regexFind"_sd;
- auto[input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
+ auto [input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
return new ExpressionRegexFind(
expCtx, std::move(input), std::move(regex), std::move(options), opName);
}
@@ -6133,7 +6077,7 @@ boost::intrusive_ptr<Expression> ExpressionRegexFindAll::parse(
BSONElement expr,
const VariablesParseState& vpsIn) {
auto opName = "$regexFindAll"_sd;
- auto[input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
+ auto [input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
return new ExpressionRegexFindAll(
expCtx, std::move(input), std::move(regex), std::move(options), opName);
}
@@ -6197,7 +6141,7 @@ boost::intrusive_ptr<Expression> ExpressionRegexMatch::parse(
BSONElement expr,
const VariablesParseState& vpsIn) {
auto opName = "$regexMatch"_sd;
- auto[input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
+ auto [input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
return new ExpressionRegexMatch(
expCtx, std::move(input), std::move(regex), std::move(options), opName);
}
diff --git a/src/mongo/db/pipeline/expression.h b/src/mongo/db/pipeline/expression.h
index 0de82b2947b..02f7858745b 100644
--- a/src/mongo/db/pipeline/expression.h
+++ b/src/mongo/db/pipeline/expression.h
@@ -369,10 +369,7 @@ public:
void validateArguments(const Expression::ExpressionVector& args) const override {
uassert(28667,
str::stream() << "Expression " << this->getOpName() << " takes at least " << MinArgs
- << " arguments, and at most "
- << MaxArgs
- << ", but "
- << args.size()
+ << " arguments, and at most " << MaxArgs << ", but " << args.size()
<< " were passed in.",
MinArgs <= args.size() && args.size() <= MaxArgs);
}
@@ -388,9 +385,7 @@ public:
void validateArguments(const Expression::ExpressionVector& args) const override {
uassert(16020,
str::stream() << "Expression " << this->getOpName() << " takes exactly " << NArgs
- << " arguments. "
- << args.size()
- << " were passed in.",
+ << " arguments. " << args.size() << " were passed in.",
args.size() == NArgs);
}
};
@@ -613,9 +608,7 @@ public:
uassert(40533,
str::stream() << _opName
<< " requires a string for the timezone argument, but was given a "
- << typeName(timeZoneId.getType())
- << " ("
- << timeZoneId.toString()
+ << typeName(timeZoneId.getType()) << " (" << timeZoneId.toString()
<< ")",
timeZoneId.getType() == BSONType::String);
@@ -676,13 +669,12 @@ public:
} else {
uasserted(40535,
str::stream() << "unrecognized option to " << opName << ": \""
- << argName
- << "\"");
+ << argName << "\"");
}
}
uassert(40539,
- str::stream() << "missing 'date' argument to " << opName << ", provided: "
- << operatorElem,
+ str::stream() << "missing 'date' argument to " << opName
+ << ", provided: " << operatorElem,
date);
return new SubClass(expCtx, std::move(date), std::move(timeZone));
}
@@ -2718,4 +2710,4 @@ public:
using ExpressionRegex::ExpressionRegex;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/expression_convert_test.cpp b/src/mongo/db/pipeline/expression_convert_test.cpp
index a0a5c2d4a64..25e18ed4b2f 100644
--- a/src/mongo/db/pipeline/expression_convert_test.cpp
+++ b/src/mongo/db/pipeline/expression_convert_test.cpp
@@ -80,8 +80,7 @@ TEST_F(ExpressionConvertTest, ParseAndSerializeWithOnError) {
<< "$path1"
<< "to"
<< "int"
- << "onError"
- << 0));
+ << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(
@@ -100,8 +99,7 @@ TEST_F(ExpressionConvertTest, ParseAndSerializeWithOnNull) {
<< "$path1"
<< "to"
<< "int"
- << "onNull"
- << 0));
+ << "onNull" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(
@@ -118,8 +116,7 @@ TEST_F(ExpressionConvertTest, ConvertWithoutInputFailsToParse) {
auto spec = BSON("$convert" << BSON("to"
<< "int"
- << "onError"
- << 0));
+ << "onError" << 0));
ASSERT_THROWS_WITH_CHECK(Expression::parseExpression(expCtx, spec, expCtx->variablesParseState),
AssertionException,
[](const AssertionException& exception) {
@@ -134,8 +131,7 @@ TEST_F(ExpressionConvertTest, ConvertWithoutToFailsToParse) {
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "onError"
- << 0));
+ << "onError" << 0));
ASSERT_THROWS_WITH_CHECK(Expression::parseExpression(expCtx, spec, expCtx->variablesParseState),
AssertionException,
[](const AssertionException& exception) {
@@ -152,8 +148,7 @@ TEST_F(ExpressionConvertTest, InvalidTypeNameFails) {
<< "$path1"
<< "to"
<< "dinosaur"
- << "onError"
- << 0));
+ << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -170,10 +165,7 @@ TEST_F(ExpressionConvertTest, NonIntegralTypeFails) {
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << 3.6
- << "onError"
- << 0));
+ << "to" << 3.6 << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -195,8 +187,7 @@ TEST_F(ExpressionConvertTest, NonStringNonNumericalTypeFails) {
<< "to"
<< BSON("dinosaur"
<< "Tyrannosaurus rex")
- << "onError"
- << 0));
+ << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -215,10 +206,7 @@ TEST_F(ExpressionConvertTest, InvalidNumericTargetTypeFails) {
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << 100
- << "onError"
- << 0));
+ << "to" << 100 << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -238,10 +226,7 @@ TEST_F(ExpressionConvertTest, NegativeNumericTargetTypeFails) {
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << -2
- << "onError"
- << 0));
+ << "to" << -2 << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -297,8 +282,7 @@ TEST_F(ExpressionConvertTest, UnsupportedConversionShouldThrowUnlessOnErrorProvi
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << Value(targetTypeName)));
+ << "to" << Value(targetTypeName)));
Document input{{"path1", inputValue}};
@@ -320,9 +304,7 @@ TEST_F(ExpressionConvertTest, UnsupportedConversionShouldThrowUnlessOnErrorProvi
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << Value(targetTypeName)
- << "onError"
+ << "to" << Value(targetTypeName) << "onError"
<< "X"));
Document input{{"path1", inputValue}};
diff --git a/src/mongo/db/pipeline/expression_date_test.cpp b/src/mongo/db/pipeline/expression_date_test.cpp
index 67e798d17af..49099b47b36 100644
--- a/src/mongo/db/pipeline/expression_date_test.cpp
+++ b/src/mongo/db/pipeline/expression_date_test.cpp
@@ -46,14 +46,10 @@ TEST_F(ExpressionDateFromPartsTest, SerializesToObjectSyntax) {
// Test that it serializes to the full format if given an object specification.
BSONObj spec =
- BSON("$dateFromParts" << BSON(
- "year" << 2017 << "month" << 6 << "day" << 27 << "hour" << 14 << "minute" << 37
- << "second"
- << 15
- << "millisecond"
- << 414
- << "timezone"
- << "America/Los_Angeles"));
+ BSON("$dateFromParts" << BSON("year" << 2017 << "month" << 6 << "day" << 27 << "hour" << 14
+ << "minute" << 37 << "second" << 15 << "millisecond"
+ << 414 << "timezone"
+ << "America/Los_Angeles"));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
auto expectedSerialization =
Value(Document{{"$dateFromParts",
@@ -84,16 +80,15 @@ TEST_F(ExpressionDateFromPartsTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it becomes a constant if both year, hour and minute are provided, and are both
// expressions which evaluate to constants.
spec = BSON("$dateFromParts" << BSON("year" << BSON("$add" << BSON_ARRAY(1900 << 107)) << "hour"
- << BSON("$add" << BSON_ARRAY(13 << 1))
- << "minute"
+ << BSON("$add" << BSON_ARRAY(13 << 1)) << "minute"
<< BSON("$add" << BSON_ARRAY(40 << 3))));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
// Test that it becomes a constant if both year and milliseconds are provided, and year is an
// expressions which evaluate to a constant, with milliseconds a constant
- spec = BSON("$dateFromParts" << BSON(
- "year" << BSON("$add" << BSON_ARRAY(1900 << 107)) << "millisecond" << 514));
+ spec = BSON("$dateFromParts" << BSON("year" << BSON("$add" << BSON_ARRAY(1900 << 107))
+ << "millisecond" << 514));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -105,11 +100,10 @@ TEST_F(ExpressionDateFromPartsTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it becomes a constant if both isoWeekYear, isoWeek and isoDayOfWeek are provided,
// and are both expressions which evaluate to constants.
- spec = BSON("$dateFromParts" << BSON("isoWeekYear" << BSON("$add" << BSON_ARRAY(1017 << 1000))
- << "isoWeek"
- << BSON("$add" << BSON_ARRAY(20 << 6))
- << "isoDayOfWeek"
- << BSON("$add" << BSON_ARRAY(3 << 2))));
+ spec = BSON("$dateFromParts" << BSON("isoWeekYear"
+ << BSON("$add" << BSON_ARRAY(1017 << 1000)) << "isoWeek"
+ << BSON("$add" << BSON_ARRAY(20 << 6)) << "isoDayOfWeek"
+ << BSON("$add" << BSON_ARRAY(3 << 2))));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -117,8 +111,7 @@ TEST_F(ExpressionDateFromPartsTest, OptimizesToConstantIfAllInputsAreConstant) {
// year is not a constant.
spec = BSON("$dateFromParts" << BSON("year"
<< "$year"
- << "month"
- << 6));
+ << "month" << 6));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_FALSE(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -180,8 +173,7 @@ TEST_F(ExpressionDateToPartsTest, SerializesToObjectSyntax) {
// Test that it serializes to the full format if given an object specification.
BSONObj spec = BSON("$dateToParts" << BSON("date" << Date_t{} << "timezone"
<< "Europe/London"
- << "iso8601"
- << false));
+ << "iso8601" << false));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
auto expectedSerialization =
Value(Document{{"$dateToParts",
@@ -224,8 +216,7 @@ TEST_F(ExpressionDateToPartsTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it becomes a constant if both date and iso8601 are provided, and are both
// expressions which evaluate to constants.
spec = BSON("$dateToParts" << BSON("date" << BSON("$add" << BSON_ARRAY(Date_t{} << 1000))
- << "iso8601"
- << BSON("$not" << false)));
+ << "iso8601" << BSON("$not" << false)));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -305,8 +296,7 @@ TEST_F(DateExpressionTest, ParsingRejectsUnrecognizedFieldsInObjectSpecification
for (auto&& expName : dateExpressions) {
BSONObj spec = BSON(expName << BSON("date" << Date_t{} << "timezone"
<< "Europe/London"
- << "extra"
- << 4));
+ << "extra" << 4));
ASSERT_THROWS_CODE(Expression::parseExpression(expCtx, spec, expCtx->variablesParseState),
AssertionException,
40535);
@@ -561,8 +551,7 @@ TEST_F(DateExpressionTest, DoesResultInNullIfGivenNullishInput) {
// Test that the expression results in null if the date and timezone both nullish.
spec = BSON(expName << BSON("date"
<< "$missing"
- << "timezone"
- << BSONUndefined));
+ << "timezone" << BSONUndefined));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate(contextDoc, &expCtx->variables));
@@ -619,8 +608,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// missing.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}));
+ << "date" << Date_t{}));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -628,9 +616,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// constants.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}
- << "timezone"
+ << "date" << Date_t{} << "timezone"
<< "Europe/Amsterdam"));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -639,8 +625,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// expressions which evaluate to constants.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m%d"
- << "date"
- << BSON("$add" << BSON_ARRAY(Date_t{} << 1000))
+ << "date" << BSON("$add" << BSON_ARRAY(Date_t{} << 1000))
<< "timezone"
<< BSON("$concat" << BSON_ARRAY("Europe"
<< "/"
@@ -652,9 +637,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// 'onNull'.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}
- << "timezone"
+ << "date" << Date_t{} << "timezone"
<< "Europe/Amsterdam"
<< "onNull"
<< "null default"));
@@ -676,9 +659,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// timezone is not a constant.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}
- << "timezone"
+ << "date" << Date_t{} << "timezone"
<< "$tz"));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_FALSE(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -686,9 +667,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it does *not* become a constant if 'onNull' does not evaluate to a constant.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}
- << "onNull"
+ << "date" << Date_t{} << "onNull"
<< "$onNull"));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_FALSE(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -696,8 +675,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it does *not* become a constant if 'format' does not evaluate to a constant.
spec = BSON("$dateToString" << BSON("format"
<< "$format"
- << "date"
- << Date_t{}));
+ << "date" << Date_t{}));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_FALSE(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
}
@@ -707,19 +685,14 @@ TEST_F(ExpressionDateToStringTest, ReturnsOnNullValueWhenInputIsNullish) {
auto spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << BSONNULL
- << "onNull"
+ << "date" << BSONNULL << "onNull"
<< "null default"));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value("null default"_sd), dateExp->evaluate({}, &expCtx->variables));
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << BSONNULL
- << "onNull"
- << BSONNULL));
+ << "date" << BSONNULL << "onNull" << BSONNULL));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
@@ -1074,15 +1047,13 @@ TEST_F(ExpressionDateFromStringTest, RejectsNonStringFormat) {
auto spec = BSON("$dateFromString" << BSON("dateString"
<< "2017-07-13T10:02:57"
- << "format"
- << 2));
+ << "format" << 2));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40684);
spec = BSON("$dateFromString" << BSON("dateString"
<< "July 4, 2017"
- << "format"
- << true));
+ << "format" << true));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40684);
}
@@ -1126,8 +1097,7 @@ TEST_F(ExpressionDateFromStringTest, EvaluatesToNullIfFormatIsNullish) {
auto spec = BSON("$dateFromString" << BSON("dateString"
<< "1/1/2017"
- << "format"
- << BSONNULL));
+ << "format" << BSONNULL));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
@@ -1140,8 +1110,7 @@ TEST_F(ExpressionDateFromStringTest, EvaluatesToNullIfFormatIsNullish) {
spec = BSON("$dateFromString" << BSON("dateString"
<< "1/1/2017"
- << "format"
- << BSONUndefined));
+ << "format" << BSONUndefined));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
}
@@ -1265,8 +1234,7 @@ TEST_F(ExpressionDateFromStringTest, InvalidFormatTakesPrecedenceOverOnNull) {
auto spec = BSON("$dateFromString" << BSON("dateString" << BSONNULL << "onNull"
<< "Null default"
- << "format"
- << 5));
+ << "format" << 5));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40684);
@@ -1285,8 +1253,7 @@ TEST_F(ExpressionDateFromStringTest, InvalidFormatTakesPrecedenceOverOnError) {
<< "Invalid dateString"
<< "onError"
<< "Not used default"
- << "format"
- << 5));
+ << "format" << 5));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40684);
@@ -1303,8 +1270,7 @@ TEST_F(ExpressionDateFromStringTest, InvalidTimezoneTakesPrecedenceOverOnNull) {
auto spec = BSON("$dateFromString" << BSON("dateString" << BSONNULL << "onNull"
<< "Null default"
- << "timezone"
- << 5));
+ << "timezone" << 5));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40517);
@@ -1323,8 +1289,7 @@ TEST_F(ExpressionDateFromStringTest, InvalidTimezoneTakesPrecedenceOverOnError)
<< "Invalid dateString"
<< "onError"
<< "On error default"
- << "timezone"
- << 5));
+ << "timezone" << 5));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40517);
@@ -1341,15 +1306,13 @@ TEST_F(ExpressionDateFromStringTest, OnNullTakesPrecedenceOverOtherNullishParame
auto spec = BSON("$dateFromString" << BSON("dateString" << BSONNULL << "onNull"
<< "Null default"
- << "timezone"
- << BSONNULL));
+ << "timezone" << BSONNULL));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value("Null default"_sd), dateExp->evaluate({}, &expCtx->variables));
spec = BSON("$dateFromString" << BSON("dateString" << BSONNULL << "onNull"
<< "Null default"
- << "format"
- << BSONNULL));
+ << "format" << BSONNULL));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value("Null default"_sd), dateExp->evaluate({}, &expCtx->variables));
}
@@ -1361,8 +1324,7 @@ TEST_F(ExpressionDateFromStringTest, OnNullOnlyUsedIfInputStringIsNullish) {
<< "2018-02-14"
<< "onNull"
<< "Null default"
- << "timezone"
- << BSONNULL));
+ << "timezone" << BSONNULL));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
@@ -1370,8 +1332,7 @@ TEST_F(ExpressionDateFromStringTest, OnNullOnlyUsedIfInputStringIsNullish) {
<< "2018-02-14"
<< "onNull"
<< "Null default"
- << "format"
- << BSONNULL));
+ << "format" << BSONNULL));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
}
@@ -1406,10 +1367,10 @@ TEST_F(ExpressionDateFromStringTest, ReturnsOnErrorForFormatMismatch) {
TEST_F(ExpressionDateFromStringTest, OnNullEvaluatedLazily) {
auto expCtx = getExpCtx();
- auto spec = BSON("$dateFromString" << BSON("dateString"
- << "$date"
- << "onNull"
- << BSON("$divide" << BSON_ARRAY(1 << 0))));
+ auto spec =
+ BSON("$dateFromString" << BSON("dateString"
+ << "$date"
+ << "onNull" << BSON("$divide" << BSON_ARRAY(1 << 0))));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_EQ(
"2018-02-14T00:00:00.000Z",
@@ -1420,10 +1381,10 @@ TEST_F(ExpressionDateFromStringTest, OnNullEvaluatedLazily) {
TEST_F(ExpressionDateFromStringTest, OnErrorEvaluatedLazily) {
auto expCtx = getExpCtx();
- auto spec = BSON("$dateFromString" << BSON("dateString"
- << "$date"
- << "onError"
- << BSON("$divide" << BSON_ARRAY(1 << 0))));
+ auto spec =
+ BSON("$dateFromString" << BSON("dateString"
+ << "$date"
+ << "onError" << BSON("$divide" << BSON_ARRAY(1 << 0))));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_EQ(
"2018-02-14T00:00:00.000Z",
diff --git a/src/mongo/db/pipeline/expression_test.cpp b/src/mongo/db/pipeline/expression_test.cpp
index 581e6963e1e..a95cf3ff303 100644
--- a/src/mongo/db/pipeline/expression_test.cpp
+++ b/src/mongo/db/pipeline/expression_test.cpp
@@ -47,13 +47,13 @@ namespace ExpressionTests {
using boost::intrusive_ptr;
using std::initializer_list;
+using std::list;
using std::numeric_limits;
using std::pair;
using std::set;
using std::sort;
using std::string;
using std::vector;
-using std::list;
/**
* Creates an expression given by 'expressionName' and evaluates it using
@@ -590,8 +590,8 @@ TEST_F(ExpressionNaryTest, FlattenInnerOperandsOptimizationOnAssociativeOnlyMidd
intrusive_ptr<Expression> optimized = _associativeOnly->optimize();
ASSERT(_associativeOnly == optimized);
- BSONArray expectedContent = BSON_ARRAY(
- 200 << "$path3" << BSON_ARRAY(201 << 100) << "$path1" << BSON_ARRAY(101 << 99) << "$path2");
+ BSONArray expectedContent = BSON_ARRAY(200 << "$path3" << BSON_ARRAY(201 << 100) << "$path1"
+ << BSON_ARRAY(101 << 99) << "$path2");
assertContents(_associativeOnly, expectedContent);
}
@@ -737,12 +737,10 @@ TEST(ExpressionArrayToObjectTest, KVFormatSimple) {
assertExpectedResults("$arrayToObject",
{{{Value(BSON_ARRAY(BSON("k"
<< "key1"
- << "v"
- << 2)
+ << "v" << 2)
<< BSON("k"
<< "key2"
- << "v"
- << 3)))},
+ << "v" << 3)))},
{Value(BSON("key1" << 2 << "key2" << 3))}}});
}
@@ -750,12 +748,10 @@ TEST(ExpressionArrayToObjectTest, KVFormatWithDuplicates) {
assertExpectedResults("$arrayToObject",
{{{Value(BSON_ARRAY(BSON("k"
<< "hi"
- << "v"
- << 2)
+ << "v" << 2)
<< BSON("k"
<< "hi"
- << "v"
- << 3)))},
+ << "v" << 3)))},
{Value(BSON("hi" << 3))}}});
}
@@ -1888,8 +1884,7 @@ class NonConstantZero : public OptimizeBase {
class NonConstantNonConstantOne : public OptimizeBase {
BSONObj spec() {
return BSON("$and" << BSON_ARRAY("$a"
- << "$b"
- << 1));
+ << "$b" << 1));
}
BSONObj expectedOptimized() {
return BSON("$and" << BSON_ARRAY("$a"
@@ -1901,8 +1896,7 @@ class NonConstantNonConstantOne : public OptimizeBase {
class NonConstantNonConstantZero : public OptimizeBase {
BSONObj spec() {
return BSON("$and" << BSON_ARRAY("$a"
- << "$b"
- << 0));
+ << "$b" << 0));
}
BSONObj expectedOptimized() {
return BSON("$const" << false);
@@ -3261,8 +3255,7 @@ TEST(ExpressionObjectParse, ShouldAcceptLiteralsAsValues) {
auto object = ExpressionObject::parse(expCtx,
BSON("a" << 5 << "b"
<< "string"
- << "c"
- << BSONNULL),
+ << "c" << BSONNULL),
vps);
auto expectedResult =
Value(Document{{"a", literal(5)}, {"b", literal("string"_sd)}, {"c", literal(BSONNULL)}});
@@ -3386,10 +3379,10 @@ auto expressionObjectCreateHelper(
expressionsWithChildrenInPlace) {
std::vector<boost::intrusive_ptr<Expression>> children;
std::vector<std::pair<std::string, boost::intrusive_ptr<Expression>&>> expressions;
- for (auto & [ unused, expression ] : expressionsWithChildrenInPlace)
+ for (auto& [unused, expression] : expressionsWithChildrenInPlace)
children.push_back(std::move(expression));
std::vector<boost::intrusive_ptr<Expression>>::size_type index = 0;
- for (auto & [ fieldName, unused ] : expressionsWithChildrenInPlace) {
+ for (auto& [fieldName, unused] : expressionsWithChildrenInPlace) {
expressions.emplace_back(fieldName, children[index]);
++index;
}
@@ -3840,8 +3833,7 @@ class NonConstantZero : public OptimizeBase {
class NonConstantNonConstantOne : public OptimizeBase {
BSONObj spec() {
return BSON("$or" << BSON_ARRAY("$a"
- << "$b"
- << 1));
+ << "$b" << 1));
}
BSONObj expectedOptimized() {
return BSON("$const" << true);
@@ -3852,8 +3844,7 @@ class NonConstantNonConstantOne : public OptimizeBase {
class NonConstantNonConstantZero : public OptimizeBase {
BSONObj spec() {
return BSON("$or" << BSON_ARRAY("$a"
- << "$b"
- << 0));
+ << "$b" << 0));
}
BSONObj expectedOptimized() {
return BSON("$or" << BSON_ARRAY("$a"
@@ -4174,12 +4165,9 @@ class Same : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection"
- << DOC_ARRAY(1 << 2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << "$setIntersection" << DOC_ARRAY(1 << 2)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4187,12 +4175,9 @@ class Redundant : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1 << 2 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection"
- << DOC_ARRAY(1 << 2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << "$setIntersection" << DOC_ARRAY(1 << 2)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4201,11 +4186,8 @@ class DoubleRedundant : public ExpectedResultBase {
return DOC(
"input" << DOC_ARRAY(DOC_ARRAY(1 << 1 << 2) << DOC_ARRAY(1 << 2 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true << "$setIntersection"
- << DOC_ARRAY(1 << 2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << DOC_ARRAY(1 << 2) << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4213,12 +4195,9 @@ class Super : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection"
- << DOC_ARRAY(1)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << DOC_ARRAY(2)));
+ << "$setIntersection" << DOC_ARRAY(1)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << DOC_ARRAY(2)));
}
};
@@ -4226,12 +4205,9 @@ class SuperWithRedundant : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 2) << DOC_ARRAY(1)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection"
- << DOC_ARRAY(1)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << DOC_ARRAY(2)));
+ << "$setIntersection" << DOC_ARRAY(1)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << DOC_ARRAY(2)));
}
};
@@ -4239,12 +4215,9 @@ class Sub : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << false
- << "$setIntersection"
- << DOC_ARRAY(1)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << "$setIntersection" << DOC_ARRAY(1)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4252,12 +4225,9 @@ class SameBackwards : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(2 << 1)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection"
- << DOC_ARRAY(1 << 2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << "$setIntersection" << DOC_ARRAY(1 << 2)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4265,12 +4235,9 @@ class NoOverlap : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(8 << 4)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection"
- << vector<Value>()
- << "$setUnion"
- << DOC_ARRAY(1 << 2 << 4 << 8)
- << "$setDifference"
- << DOC_ARRAY(1 << 2)));
+ << "$setIntersection" << vector<Value>()
+ << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
+ << "$setDifference" << DOC_ARRAY(1 << 2)));
}
};
@@ -4278,12 +4245,9 @@ class Overlap : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(8 << 2 << 4)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection"
- << DOC_ARRAY(2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2 << 4 << 8)
- << "$setDifference"
- << DOC_ARRAY(1)));
+ << "$setIntersection" << DOC_ARRAY(2)
+ << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
+ << "$setDifference" << DOC_ARRAY(1)));
}
};
@@ -4291,8 +4255,7 @@ class LastNull : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << Value(BSONNULL)) << "expected"
<< DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
- << "$setDifference"
- << BSONNULL)
+ << "$setDifference" << BSONNULL)
<< "error"
<< DOC_ARRAY("$setEquals"_sd
<< "$setIsSubset"_sd));
@@ -4303,8 +4266,7 @@ class FirstNull : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(Value(BSONNULL) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
- << "$setDifference"
- << BSONNULL)
+ << "$setDifference" << BSONNULL)
<< "error"
<< DOC_ARRAY("$setEquals"_sd
<< "$setIsSubset"_sd));
@@ -4351,12 +4313,8 @@ class LeftArgEmpty : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(vector<Value>() << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setIsSubset"
- << true
- << "$setEquals"
- << false
- << "$setDifference"
+ << DOC_ARRAY(1 << 2) << "$setIsSubset" << true
+ << "$setEquals" << false << "$setDifference"
<< vector<Value>()));
}
};
@@ -4365,45 +4323,39 @@ class RightArgEmpty : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << vector<Value>()) << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setIsSubset"
- << false
- << "$setEquals"
- << false
- << "$setDifference"
+ << DOC_ARRAY(1 << 2) << "$setIsSubset" << false
+ << "$setEquals" << false << "$setDifference"
<< DOC_ARRAY(1 << 2)));
}
};
class ManyArgs : public ExpectedResultBase {
Document getSpec() {
- return DOC(
- "input" << DOC_ARRAY(DOC_ARRAY(8 << 3) << DOC_ARRAY("asdf"_sd
- << "foo"_sd)
- << DOC_ARRAY(80.3 << 34)
- << vector<Value>()
- << DOC_ARRAY(80.3 << "foo"_sd << 11 << "yay"_sd))
- << "expected"
- << DOC("$setIntersection" << vector<Value>() << "$setEquals" << false
- << "$setUnion"
- << DOC_ARRAY(3 << 8 << 11 << 34 << 80.3 << "asdf"_sd
- << "foo"_sd
- << "yay"_sd))
- << "error"
- << DOC_ARRAY("$setIsSubset"_sd
- << "$setDifference"_sd));
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(8 << 3)
+ << DOC_ARRAY("asdf"_sd
+ << "foo"_sd)
+ << DOC_ARRAY(80.3 << 34) << vector<Value>()
+ << DOC_ARRAY(80.3 << "foo"_sd << 11 << "yay"_sd))
+ << "expected"
+ << DOC("$setIntersection"
+ << vector<Value>() << "$setEquals" << false << "$setUnion"
+ << DOC_ARRAY(3 << 8 << 11 << 34 << 80.3 << "asdf"_sd
+ << "foo"_sd
+ << "yay"_sd))
+ << "error"
+ << DOC_ARRAY("$setIsSubset"_sd
+ << "$setDifference"_sd));
}
};
class ManyArgsEqual : public ExpectedResultBase {
Document getSpec() {
- return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 4) << DOC_ARRAY(1 << 2 << 2 << 4)
- << DOC_ARRAY(4 << 1 << 2)
- << DOC_ARRAY(2 << 1 << 1 << 4))
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 4)
+ << DOC_ARRAY(1 << 2 << 2 << 4) << DOC_ARRAY(4 << 1 << 2)
+ << DOC_ARRAY(2 << 1 << 1 << 4))
<< "expected"
<< DOC("$setIntersection" << DOC_ARRAY(1 << 2 << 4) << "$setEquals"
- << true
- << "$setUnion"
+ << true << "$setUnion"
<< DOC_ARRAY(1 << 2 << 4))
<< "error"
<< DOC_ARRAY("$setIsSubset"_sd
@@ -4690,7 +4642,7 @@ TEST(ExpressionSubstrTest, ThrowsWithNegativeStart) {
ASSERT_THROWS([&] { expr->evaluate({}, &expCtx->variables); }(), AssertionException);
}
-} // namespace Substr
+} // namespace SubstrBytes
namespace SubstrCP {
@@ -4805,8 +4757,7 @@ TEST(ExpressionTrimParsingTest, ThrowsIfSpecContainsUnrecognizedField) {
ASSERT_THROWS(Expression::parseExpression(expCtx,
BSON("$ltrim" << BSON("chars"
<< "xyz"
- << "other"
- << 1)),
+ << "other" << 1)),
expCtx->variablesParseState),
AssertionException);
ASSERT_THROWS(Expression::parseExpression(expCtx,
@@ -4814,8 +4765,7 @@ TEST(ExpressionTrimParsingTest, ThrowsIfSpecContainsUnrecognizedField) {
<< "$x"
<< "chars"
<< "xyz"
- << "other"
- << 1)),
+ << "other" << 1)),
expCtx->variablesParseState),
AssertionException);
}
@@ -5333,8 +5283,7 @@ TEST(ExpressionTrimTest, DoesOptimizeToConstantWithCustomChars) {
expCtx,
BSON("$trim" << BSON("input"
<< " abc "
- << "chars"
- << BSON("$substrCP" << BSON_ARRAY(" " << 1 << 1)))),
+ << "chars" << BSON("$substrCP" << BSON_ARRAY(" " << 1 << 1)))),
expCtx->variablesParseState);
optimized = trim->optimize();
constant = dynamic_cast<ExpressionConstant*>(optimized.get());
@@ -5879,8 +5828,9 @@ class FalseViaInt : public ExpectedResultBase {
class Null : public ExpectedResultBase {
Document getSpec() {
- return DOC("input" << DOC_ARRAY(BSONNULL) << "error" << DOC_ARRAY("$allElementsTrue"_sd
- << "$anyElementTrue"_sd));
+ return DOC("input" << DOC_ARRAY(BSONNULL) << "error"
+ << DOC_ARRAY("$allElementsTrue"_sd
+ << "$anyElementTrue"_sd));
}
};
@@ -6465,5 +6415,5 @@ TEST(NowAndClusterTime, BasicTest) {
ASSERT_VALUE_EQ(result, Value{true});
}
}
-}
+} // namespace NowAndClusterTime
} // namespace ExpressionTests
diff --git a/src/mongo/db/pipeline/expression_trigonometric.h b/src/mongo/db/pipeline/expression_trigonometric.h
index 41f10ca2e29..cc8ca852f8b 100644
--- a/src/mongo/db/pipeline/expression_trigonometric.h
+++ b/src/mongo/db/pipeline/expression_trigonometric.h
@@ -135,12 +135,8 @@ public:
void assertBounds(T input) const {
uassert(50989,
str::stream() << "cannot apply " << getOpName() << " to " << toString(input)
- << ", value must in "
- << BoundType::leftBracket()
- << getLowerBound()
- << ","
- << getUpperBound()
- << BoundType::rightBracket(),
+ << ", value must in " << BoundType::leftBracket() << getLowerBound()
+ << "," << getUpperBound() << BoundType::rightBracket(),
checkBounds(input));
}
diff --git a/src/mongo/db/pipeline/expression_trigonometric_test.cpp b/src/mongo/db/pipeline/expression_trigonometric_test.cpp
index 49ea60e1f9b..b9356e60bae 100644
--- a/src/mongo/db/pipeline/expression_trigonometric_test.cpp
+++ b/src/mongo/db/pipeline/expression_trigonometric_test.cpp
@@ -1403,4 +1403,4 @@ TEST(ExpressionDegreesToRadiansTest, DecimalArg) {
TEST(ExpressionDegreesToRadiansTest, NullArg) {
assertEvaluates("$degreesToRadians", Value(BSONNULL), Value(BSONNULL));
}
-} // namespace expression_trigonometric_test
+} // namespace expression_tests
diff --git a/src/mongo/db/pipeline/field_path.cpp b/src/mongo/db/pipeline/field_path.cpp
index bb26fc478ca..4c9e23a86df 100644
--- a/src/mongo/db/pipeline/field_path.cpp
+++ b/src/mongo/db/pipeline/field_path.cpp
@@ -81,4 +81,4 @@ void FieldPath::uassertValidFieldName(StringData fieldName) {
uassert(
16412, "FieldPath field names may not contain '.'.", fieldName.find('.') == string::npos);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/field_path.h b/src/mongo/db/pipeline/field_path.h
index 347b236fb6b..bbc775be9db 100644
--- a/src/mongo/db/pipeline/field_path.h
+++ b/src/mongo/db/pipeline/field_path.h
@@ -136,4 +136,4 @@ inline bool operator<(const FieldPath& lhs, const FieldPath& rhs) {
inline bool operator==(const FieldPath& lhs, const FieldPath& rhs) {
return lhs.fullPath() == rhs.fullPath();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp b/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp
index 6db3d45ea78..56164da1b21 100644
--- a/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp
+++ b/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp
@@ -106,13 +106,9 @@ void testRoundingUpInSeries(intrusive_ptr<GranularityRounder> rounder) {
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << input.coerceToDouble()
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
+ << " failed rounding up the value " << input.coerceToDouble()
+ << " at multiplier level " << multiplier << ". Expected "
+ << expectedValue.coerceToDouble() << ", but got "
<< roundedValue.coerceToDouble());
}
}
@@ -140,15 +136,12 @@ void testRoundingUpInSeriesDecimal(intrusive_ptr<GranularityRounder> rounder) {
try {
testEquals(roundedValue, expectedValue);
} catch (...) {
- FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << input.coerceToDecimal().toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
- << roundedValue.coerceToDecimal().toString());
+ FAIL(str::stream()
+ << "The GranularityRounder for " << rounder->getName()
+ << " failed rounding up the value " << input.coerceToDecimal().toString()
+ << " at multiplier level " << multiplier.toString() << ". Expected "
+ << expectedValue.coerceToDecimal().toString() << ", but got "
+ << roundedValue.coerceToDecimal().toString());
}
}
multiplier = multiplier.multiply(Decimal128(10));
@@ -175,15 +168,11 @@ void testRoundingUpBetweenSeries(intrusive_ptr<GranularityRounder> rounder) {
try {
testEquals(roundedValue, expectedValue);
} catch (...) {
- FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << middle
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
- << roundedValue.coerceToDouble());
+ FAIL(str::stream()
+ << "The GranularityRounder for " << rounder->getName()
+ << " failed rounding up the value " << middle << " at multiplier level "
+ << multiplier << ". Expected " << expectedValue.coerceToDouble()
+ << ", but got " << roundedValue.coerceToDouble());
}
}
multiplier *= 10.0;
@@ -212,14 +201,10 @@ void testRoundingUpBetweenSeriesDecimal(intrusive_ptr<GranularityRounder> rounde
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << middle.toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
- << roundedValue.coerceToDecimal().toString());
+ << " failed rounding up the value " << middle.toString()
+ << " at multiplier level " << multiplier.toString()
+ << ". Expected " << expectedValue.coerceToDecimal().toString()
+ << ", but got " << roundedValue.coerceToDecimal().toString());
}
}
multiplier = multiplier.multiply(Decimal128(10));
@@ -244,13 +229,9 @@ void testRoundingDownInSeries(intrusive_ptr<GranularityRounder> rounder) {
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << input.coerceToDouble()
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
+ << " failed rounding down the value " << input.coerceToDouble()
+ << " at multiplier level " << multiplier << ". Expected "
+ << expectedValue.coerceToDouble() << ", but got "
<< roundedValue.coerceToDouble());
}
}
@@ -277,15 +258,12 @@ void testRoundingDownInSeriesDecimal(intrusive_ptr<GranularityRounder> rounder)
try {
testEquals(roundedValue, expectedValue);
} catch (...) {
- FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << input.coerceToDecimal().toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
- << roundedValue.coerceToDecimal().toString());
+ FAIL(str::stream()
+ << "The GranularityRounder for " << rounder->getName()
+ << " failed rounding down the value " << input.coerceToDecimal().toString()
+ << " at multiplier level " << multiplier.toString() << ". Expected "
+ << expectedValue.coerceToDecimal().toString() << ", but got "
+ << roundedValue.coerceToDecimal().toString());
}
}
multiplier = multiplier.multiply(Decimal128(10));
@@ -312,15 +290,11 @@ void testRoundingDownBetweenSeries(intrusive_ptr<GranularityRounder> rounder) {
try {
testEquals(roundedValue, expectedValue);
} catch (...) {
- FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << middle
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
- << roundedValue.coerceToDouble());
+ FAIL(str::stream()
+ << "The GranularityRounder for " << rounder->getName()
+ << " failed rounding down the value " << middle << " at multiplier level "
+ << multiplier << ". Expected " << expectedValue.coerceToDouble()
+ << ", but got " << roundedValue.coerceToDouble());
}
}
multiplier *= 10.0;
@@ -349,14 +323,10 @@ void testRoundingDownBetweenSeriesDecimal(intrusive_ptr<GranularityRounder> roun
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << middle.toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
- << roundedValue.coerceToDecimal().toString());
+ << " failed rounding down the value " << middle.toString()
+ << " at multiplier level " << multiplier.toString()
+ << ". Expected " << expectedValue.coerceToDecimal().toString()
+ << ", but got " << roundedValue.coerceToDecimal().toString());
}
}
multiplier = multiplier.multiply(Decimal128(10));
@@ -383,13 +353,9 @@ void testSeriesWrappingAround(intrusive_ptr<GranularityRounder> rounder) {
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << input.coerceToDouble()
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
+ << " failed rounding up the value " << input.coerceToDouble()
+ << " at multiplier level " << multiplier << ". Expected "
+ << expectedValue.coerceToDouble() << ", but got "
<< roundedValue.coerceToDouble());
}
@@ -400,13 +366,9 @@ void testSeriesWrappingAround(intrusive_ptr<GranularityRounder> rounder) {
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << input.coerceToDouble()
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
+ << " failed rounding down the value " << input.coerceToDouble()
+ << " at multiplier level " << multiplier << ". Expected "
+ << expectedValue.coerceToDouble() << ", but got "
<< roundedValue.coerceToDouble());
}
multiplier *= 10.0;
@@ -430,12 +392,9 @@ void testSeriesWrappingAroundDecimal(intrusive_ptr<GranularityRounder> rounder)
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
<< " failed rounding up the value "
- << input.coerceToDecimal().toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
+ << input.coerceToDecimal().toString() << " at multiplier level "
+ << multiplier.toString() << ". Expected "
+ << expectedValue.coerceToDecimal().toString() << ", but got "
<< roundedValue.coerceToDecimal().toString());
}
@@ -449,12 +408,9 @@ void testSeriesWrappingAroundDecimal(intrusive_ptr<GranularityRounder> rounder)
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
<< " failed rounding down the value "
- << input.coerceToDecimal().toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
+ << input.coerceToDecimal().toString() << " at multiplier level "
+ << multiplier.toString() << ". Expected "
+ << expectedValue.coerceToDecimal().toString() << ", but got "
<< roundedValue.coerceToDecimal().toString());
}
multiplier.multiply(Decimal128(10));
diff --git a/src/mongo/db/pipeline/lite_parsed_document_source.cpp b/src/mongo/db/pipeline/lite_parsed_document_source.cpp
index 87aebb72238..28b5b133a65 100644
--- a/src/mongo/db/pipeline/lite_parsed_document_source.cpp
+++ b/src/mongo/db/pipeline/lite_parsed_document_source.cpp
@@ -61,4 +61,4 @@ std::unique_ptr<LiteParsedDocumentSource> LiteParsedDocumentSource::parse(
return it->second(request, specElem);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/lite_parsed_pipeline.cpp b/src/mongo/db/pipeline/lite_parsed_pipeline.cpp
index b1802c91970..81a10467a58 100644
--- a/src/mongo/db/pipeline/lite_parsed_pipeline.cpp
+++ b/src/mongo/db/pipeline/lite_parsed_pipeline.cpp
@@ -54,8 +54,7 @@ void LiteParsedPipeline::assertSupportsReadConcern(
uassert(ErrorCodes::InvalidOptions,
str::stream() << "Explain for the aggregate command cannot run with a readConcern "
<< "other than 'local', or in a multi-document transaction. Current "
- << "readConcern: "
- << readConcern.toString(),
+ << "readConcern: " << readConcern.toString(),
!explain || readConcern.getLevel() == repl::ReadConcernLevel::kLocalReadConcern);
for (auto&& spec : _stageSpecs) {
diff --git a/src/mongo/db/pipeline/lookup_set_cache.h b/src/mongo/db/pipeline/lookup_set_cache.h
index 0a68a65d086..69f897f5583 100644
--- a/src/mongo/db/pipeline/lookup_set_cache.h
+++ b/src/mongo/db/pipeline/lookup_set_cache.h
@@ -47,10 +47,10 @@
namespace mongo {
using boost::multi_index_container;
-using boost::multi_index::sequenced;
using boost::multi_index::hashed_unique;
-using boost::multi_index::member;
using boost::multi_index::indexed_by;
+using boost::multi_index::member;
+using boost::multi_index::sequenced;
/**
* A least-recently-used cache from key to a vector of values. It does not implement any default
diff --git a/src/mongo/db/pipeline/mongos_process_interface.cpp b/src/mongo/db/pipeline/mongos_process_interface.cpp
index 331b02bcbcf..6cc2a11d8eb 100644
--- a/src/mongo/db/pipeline/mongos_process_interface.cpp
+++ b/src/mongo/db/pipeline/mongos_process_interface.cpp
@@ -227,15 +227,12 @@ boost::optional<Document> MongoSInterface::lookupSingleDocument(
uassert(ErrorCodes::InternalError,
str::stream() << "Shard cursor was unexpectedly open after lookup: "
<< shardResult.front().getHostAndPort()
- << ", id: "
- << cursor.getCursorId(),
+ << ", id: " << cursor.getCursorId(),
cursor.getCursorId() == 0);
uassert(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "found more than one document matching " << filter.toString() << " ["
- << batch.begin()->toString()
- << ", "
- << std::next(batch.begin())->toString()
- << "]",
+ << batch.begin()->toString() << ", "
+ << std::next(batch.begin())->toString() << "]",
batch.size() <= 1u);
return (!batch.empty() ? Document(batch.front()) : boost::optional<Document>{});
diff --git a/src/mongo/db/pipeline/parsed_aggregation_projection.cpp b/src/mongo/db/pipeline/parsed_aggregation_projection.cpp
index 3f283079ac4..42a76ef94e5 100644
--- a/src/mongo/db/pipeline/parsed_aggregation_projection.cpp
+++ b/src/mongo/db/pipeline/parsed_aggregation_projection.cpp
@@ -86,11 +86,7 @@ void ProjectionSpecValidator::ensurePathDoesNotConflictOrThrow(const std::string
uassert(40176,
str::stream() << "specification contains two conflicting paths. "
"Cannot specify both '"
- << path
- << "' and '"
- << *conflictingPath
- << "': "
- << _rawObj.toString(),
+ << path << "' and '" << *conflictingPath << "': " << _rawObj.toString(),
!conflictingPath);
}
@@ -129,10 +125,8 @@ void ProjectionSpecValidator::parseNestedObject(const BSONObj& thisLevelSpec,
uasserted(40181,
str::stream() << "an expression specification must contain exactly "
"one field, the name of the expression. Found "
- << thisLevelSpec.nFields()
- << " fields in "
- << thisLevelSpec.toString()
- << ", while parsing object "
+ << thisLevelSpec.nFields() << " fields in "
+ << thisLevelSpec.toString() << ", while parsing object "
<< _rawObj.toString());
}
ensurePathDoesNotConflictOrThrow(prefix.fullPath());
@@ -141,8 +135,7 @@ void ProjectionSpecValidator::parseNestedObject(const BSONObj& thisLevelSpec,
if (fieldName.find('.') != std::string::npos) {
uasserted(40183,
str::stream() << "cannot use dotted field name '" << fieldName
- << "' in a sub object: "
- << _rawObj.toString());
+ << "' in a sub object: " << _rawObj.toString());
}
parseElement(elem, FieldPath::getFullyQualifiedPath(prefix.fullPath(), fieldName));
}
@@ -245,23 +238,25 @@ private:
} else if ((elem.isBoolean() || elem.isNumber()) && !elem.trueValue()) {
// If this is an excluded field other than '_id', ensure that the projection type has
// not already been set to kInclusionProjection.
- uassert(40178,
- str::stream() << "Bad projection specification, cannot exclude fields "
- "other than '_id' in an inclusion projection: "
- << _rawObj.toString(),
- !_parsedType || (*_parsedType ==
- TransformerInterface::TransformerType::kExclusionProjection));
+ uassert(
+ 40178,
+ str::stream() << "Bad projection specification, cannot exclude fields "
+ "other than '_id' in an inclusion projection: "
+ << _rawObj.toString(),
+ !_parsedType ||
+ (*_parsedType == TransformerInterface::TransformerType::kExclusionProjection));
_parsedType = TransformerInterface::TransformerType::kExclusionProjection;
} else {
// A boolean true, a truthy numeric value, or any expression can only be used with an
// inclusion projection. Note that literal values like "string" or null are also treated
// as expressions.
- uassert(40179,
- str::stream() << "Bad projection specification, cannot include fields or "
- "add computed fields during an exclusion projection: "
- << _rawObj.toString(),
- !_parsedType || (*_parsedType ==
- TransformerInterface::TransformerType::kInclusionProjection));
+ uassert(
+ 40179,
+ str::stream() << "Bad projection specification, cannot include fields or "
+ "add computed fields during an exclusion projection: "
+ << _rawObj.toString(),
+ !_parsedType ||
+ (*_parsedType == TransformerInterface::TransformerType::kInclusionProjection));
_parsedType = TransformerInterface::TransformerType::kInclusionProjection;
}
}
diff --git a/src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp b/src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp
index 15efa442726..27ce39b9c86 100644
--- a/src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp
+++ b/src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp
@@ -149,15 +149,13 @@ TEST(ParsedAggregationProjectionErrors, ShouldRejectPathConflictsWithNonAlphaNum
// Then assert that we throw when we introduce a prefixed field.
ASSERT_THROWS(
- makeProjectionWithDefaultPolicies(
- BSON("a.b-c" << true << "a.b" << true << "a.b?c" << true << "a.b c" << true << "a.b.d"
- << true)),
- AssertionException);
- ASSERT_THROWS(
- makeProjectionWithDefaultPolicies(BSON(
- "a.b.d" << false << "a.b c" << false << "a.b?c" << false << "a.b" << false << "a.b-c"
- << false)),
+ makeProjectionWithDefaultPolicies(BSON("a.b-c" << true << "a.b" << true << "a.b?c" << true
+ << "a.b c" << true << "a.b.d" << true)),
AssertionException);
+ ASSERT_THROWS(makeProjectionWithDefaultPolicies(BSON("a.b.d" << false << "a.b c" << false
+ << "a.b?c" << false << "a.b"
+ << false << "a.b-c" << false)),
+ AssertionException);
// Adding the same field twice.
ASSERT_THROWS(makeProjectionWithDefaultPolicies(
@@ -168,34 +166,24 @@ TEST(ParsedAggregationProjectionErrors, ShouldRejectPathConflictsWithNonAlphaNum
AssertionException);
// Mix of include/exclude and adding a shared prefix.
- ASSERT_THROWS(
- makeProjectionWithDefaultPolicies(
- BSON("a.b-c" << true << "a.b" << wrapInLiteral(1) << "a.b?c" << true << "a.b c" << true
- << "a.b.d"
- << true)),
- AssertionException);
+ ASSERT_THROWS(makeProjectionWithDefaultPolicies(
+ BSON("a.b-c" << true << "a.b" << wrapInLiteral(1) << "a.b?c" << true
+ << "a.b c" << true << "a.b.d" << true)),
+ AssertionException);
ASSERT_THROWS(makeProjectionWithDefaultPolicies(
BSON("a.b.d" << false << "a.b c" << false << "a.b?c" << false << "a.b"
- << wrapInLiteral(0)
- << "a.b-c"
- << false)),
+ << wrapInLiteral(0) << "a.b-c" << false)),
AssertionException);
// Adding a shared prefix twice.
ASSERT_THROWS(makeProjectionWithDefaultPolicies(
BSON("a.b-c" << wrapInLiteral(1) << "a.b" << wrapInLiteral(1) << "a.b?c"
- << wrapInLiteral(1)
- << "a.b c"
- << wrapInLiteral(1)
- << "a.b.d"
+ << wrapInLiteral(1) << "a.b c" << wrapInLiteral(1) << "a.b.d"
<< wrapInLiteral(0))),
AssertionException);
ASSERT_THROWS(makeProjectionWithDefaultPolicies(
BSON("a.b.d" << wrapInLiteral(1) << "a.b c" << wrapInLiteral(1) << "a.b?c"
- << wrapInLiteral(1)
- << "a.b"
- << wrapInLiteral(0)
- << "a.b-c"
+ << wrapInLiteral(1) << "a.b" << wrapInLiteral(0) << "a.b-c"
<< wrapInLiteral(1))),
AssertionException);
}
diff --git a/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp b/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
index ca85af5fae1..df71508b41e 100644
--- a/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
+++ b/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
@@ -240,10 +240,8 @@ TEST(InclusionProjectionExecutionTest, ShouldOptimizeNestedExpressions) {
TEST(InclusionProjectionExecutionTest, ShouldReportThatAllExceptIncludedFieldsAreModified) {
auto inclusion = makeInclusionProjectionWithDefaultPolicies();
- inclusion.parse(BSON(
- "a" << wrapInLiteral("computedVal") << "b.c" << wrapInLiteral("computedVal") << "d" << true
- << "e.f"
- << true));
+ inclusion.parse(BSON("a" << wrapInLiteral("computedVal") << "b.c"
+ << wrapInLiteral("computedVal") << "d" << true << "e.f" << true));
auto modifiedPaths = inclusion.getModifiedPaths();
ASSERT(modifiedPaths.type == DocumentSource::GetModPathsReturn::Type::kAllExcept);
@@ -261,11 +259,7 @@ TEST(InclusionProjectionExecutionTest,
ShouldReportThatAllExceptIncludedFieldsAreModifiedWithIdExclusion) {
auto inclusion = makeInclusionProjectionWithDefaultPolicies();
inclusion.parse(BSON("_id" << false << "a" << wrapInLiteral("computedVal") << "b.c"
- << wrapInLiteral("computedVal")
- << "d"
- << true
- << "e.f"
- << true));
+ << wrapInLiteral("computedVal") << "d" << true << "e.f" << true));
auto modifiedPaths = inclusion.getModifiedPaths();
ASSERT(modifiedPaths.type == DocumentSource::GetModPathsReturn::Type::kAllExcept);
@@ -573,11 +567,10 @@ TEST(InclusionProjectionExecutionTest, ShouldAllowMixedNestedAndDottedFields) {
auto inclusion = makeInclusionProjectionWithDefaultPolicies();
// Include all of "a.b", "a.c", "a.d", and "a.e".
// Add new computed fields "a.W", "a.X", "a.Y", and "a.Z".
- inclusion.parse(BSON(
- "a.b" << true << "a.c" << true << "a.W" << wrapInLiteral("W") << "a.X" << wrapInLiteral("X")
- << "a"
- << BSON("d" << true << "e" << true << "Y" << wrapInLiteral("Y") << "Z"
- << wrapInLiteral("Z"))));
+ inclusion.parse(BSON("a.b" << true << "a.c" << true << "a.W" << wrapInLiteral("W") << "a.X"
+ << wrapInLiteral("X") << "a"
+ << BSON("d" << true << "e" << true << "Y" << wrapInLiteral("Y")
+ << "Z" << wrapInLiteral("Z"))));
auto result = inclusion.applyProjection(Document{
{"a",
Document{{"b", "b"_sd}, {"c", "c"_sd}, {"d", "d"_sd}, {"e", "e"_sd}, {"f", "f"_sd}}}});
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index 439d4d9ab88..1ce178126dd 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -169,9 +169,9 @@ void Pipeline::validateTopLevelPipeline() const {
if (nss.isCollectionlessAggregateNS() &&
!firstStageConstraints.isIndependentOfAnyCollection) {
uasserted(ErrorCodes::InvalidNamespace,
- str::stream() << "{aggregate: 1} is not valid for '"
- << _sources.front()->getSourceName()
- << "'; a collection is required.");
+ str::stream()
+ << "{aggregate: 1} is not valid for '"
+ << _sources.front()->getSourceName() << "'; a collection is required.");
}
if (!nss.isCollectionlessAggregateNS() &&
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 178426d0c94..96047148104 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -516,9 +516,9 @@ PipelineD::buildInnerQueryExecutorGeneric(Collection* collection,
(pipeline->peekFront() && pipeline->peekFront()->constraints().isChangeStreamStage());
auto attachExecutorCallback = [deps, queryObj, sortObj, projForQuery, trackOplogTS](
- Collection* collection,
- std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
- Pipeline* pipeline) {
+ Collection* collection,
+ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
+ Pipeline* pipeline) {
auto cursor = DocumentSourceCursor::create(
collection, std::move(exec), pipeline->getContext(), trackOplogTS);
addCursorSource(
@@ -575,15 +575,14 @@ PipelineD::buildInnerQueryExecutorGeoNear(Collection* collection,
str::stream() << "Unexpectedly got the following sort from the query system: "
<< sortFromQuerySystem.jsonString());
- auto attachExecutorCallback =
- [
- deps,
- distanceField = geoNearStage->getDistanceField(),
- locationField = geoNearStage->getLocationField(),
- distanceMultiplier = geoNearStage->getDistanceMultiplier().value_or(1.0)
- ](Collection * collection,
- std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
- Pipeline * pipeline) {
+ auto attachExecutorCallback = [deps,
+ distanceField = geoNearStage->getDistanceField(),
+ locationField = geoNearStage->getLocationField(),
+ distanceMultiplier =
+ geoNearStage->getDistanceMultiplier().value_or(1.0)](
+ Collection* collection,
+ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
+ Pipeline* pipeline) {
auto cursor = DocumentSourceGeoNearCursor::create(collection,
std::move(exec),
pipeline->getContext(),
diff --git a/src/mongo/db/pipeline/pipeline_metadata_tree.h b/src/mongo/db/pipeline/pipeline_metadata_tree.h
index fe8c1f02770..1a22c452590 100644
--- a/src/mongo/db/pipeline/pipeline_metadata_tree.h
+++ b/src/mongo/db/pipeline/pipeline_metadata_tree.h
@@ -117,8 +117,7 @@ inline auto findStageContents(const NamespaceString& ns,
auto it = initialStageContents.find(ns);
uassert(51213,
str::stream() << "Metadata to initialize an aggregation pipeline associated with "
- << ns.coll()
- << " is missing.",
+ << ns.coll() << " is missing.",
it != initialStageContents.end());
return it->second;
}
@@ -154,7 +153,7 @@ inline auto makeAdditionalChildren(
std::vector<T> offTheEndContents;
if (auto lookupSource = dynamic_cast<const DocumentSourceLookUp*>(&source);
lookupSource && lookupSource->wasConstructedWithPipelineSyntax()) {
- auto[child, offTheEndReshaper] =
+ auto [child, offTheEndReshaper] =
makeTreeWithOffTheEndStage(std::move(initialStageContents),
lookupSource->getResolvedIntrospectionPipeline(),
propagator);
@@ -166,7 +165,7 @@ inline auto makeAdditionalChildren(
facetSource->getFacetPipelines().end(),
std::back_inserter(children),
[&](const auto& fPipe) {
- auto[child, offTheEndReshaper] = makeTreeWithOffTheEndStage(
+ auto [child, offTheEndReshaper] = makeTreeWithOffTheEndStage(
std::move(initialStageContents), *fPipe.pipeline, propagator);
offTheEndContents.push_back(offTheEndReshaper(child.get().contents));
return std::move(*child);
@@ -192,13 +191,15 @@ inline auto makeStage(
auto contents = (previous) ? reshapeContents(previous.get().contents)
: findStageContents(source.getContext()->ns, initialStageContents);
- auto[additionalChildren, offTheEndContents] =
+ auto [additionalChildren, offTheEndContents] =
makeAdditionalChildren(std::move(initialStageContents), source, propagator, contents);
auto principalChild = previous ? std::make_unique<Stage<T>>(std::move(previous.get()))
: std::unique_ptr<Stage<T>>();
- std::function<T(const T&)> reshaper([&, offTheEndContents{std::move(offTheEndContents)} ](
- const T& reshapable) { return propagator(reshapable, offTheEndContents, source); });
+ std::function<T(const T&)> reshaper(
+ [&, offTheEndContents{std::move(offTheEndContents)}](const T& reshapable) {
+ return propagator(reshapable, offTheEndContents, source);
+ });
return std::pair(
boost::optional<Stage<T>>(
Stage(std::move(contents), std::move(principalChild), std::move(additionalChildren))),
@@ -278,7 +279,7 @@ inline std::pair<boost::optional<Stage<T>>, T> makeTree(
findStageContents(pipeline.getContext()->ns, initialStageContents));
}
- auto && [ finalStage, reshaper ] =
+ auto&& [finalStage, reshaper] =
detail::makeTreeWithOffTheEndStage(std::move(initialStageContents), pipeline, propagator);
return std::pair(std::move(*finalStage), reshaper(finalStage.get().contents));
diff --git a/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp b/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp
index 25a161c2048..5a15074b361 100644
--- a/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp
+++ b/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp
@@ -129,7 +129,8 @@ TEST_F(PipelineMetadataTreeTest, LinearPipelinesConstructProperTrees) {
auto pipePtr = jsonToPipeline("[{$project: {name: 1}}]");
return makeTree<TestThing>(
{{NamespaceString("test.collection"), initial}}, *pipePtr, ignoreDocumentSourceAddOne);
- }().first.get() == Stage(TestThing{23}, {}, {}));
+ }()
+ .first.get() == Stage(TestThing{23}, {}, {}));
ASSERT([&]() {
auto pipePtr = jsonToPipeline(
@@ -137,7 +138,8 @@ TEST_F(PipelineMetadataTreeTest, LinearPipelinesConstructProperTrees) {
"{$match: {status: \"completed\"}}]");
return makeTree<TestThing>(
{{NamespaceString("test.collection"), initial}}, *pipePtr, ignoreDocumentSourceAddOne);
- }().first.get() == Stage(TestThing{24}, makeUniqueStage(TestThing{23}, {}, {}), {}));
+ }()
+ .first.get() == Stage(TestThing{24}, makeUniqueStage(TestThing{23}, {}, {}), {}));
ASSERT([&]() {
auto pipePtr = jsonToPipeline(
@@ -149,7 +151,8 @@ TEST_F(PipelineMetadataTreeTest, LinearPipelinesConstructProperTrees) {
"{$match: {status: \"completed\"}}]");
return makeTree<TestThing>(
{{NamespaceString("test.collection"), initial}}, *pipePtr, ignoreDocumentSourceAddOne);
- }().first.get() ==
+ }()
+ .first.get() ==
Stage(TestThing{28},
makeUniqueStage(
TestThing{27},
@@ -247,7 +250,8 @@ TEST_F(PipelineMetadataTreeTest, BranchingPipelinesConstructProperTrees) {
{NamespaceString("test.instruments"), {"2"}}},
*pipePtr,
buildRepresentativeString);
- }().first.get() ==
+ }()
+ .first.get() ==
Stage(TestThing{"1mpxul[2m]ulu"},
makeUniqueStage(
TestThing{"1mpxul[2m]ul"},
@@ -283,7 +287,8 @@ TEST_F(PipelineMetadataTreeTest, BranchingPipelinesConstructProperTrees) {
"{$limit: 12}]");
return makeTree<TestThing>(
{{NamespaceString("test.collection"), {""}}}, *pipePtr, buildRepresentativeString);
- }().first.get() ==
+ }()
+ .first.get() ==
Stage(TestThing{"f[tugs, tmgs, tb]"},
makeUniqueStage(
TestThing{""},
diff --git a/src/mongo/db/pipeline/process_interface_standalone.cpp b/src/mongo/db/pipeline/process_interface_standalone.cpp
index d7a51028503..225303b9999 100644
--- a/src/mongo/db/pipeline/process_interface_standalone.cpp
+++ b/src/mongo/db/pipeline/process_interface_standalone.cpp
@@ -187,7 +187,7 @@ Update MongoInterfaceStandalone::buildUpdateOp(
for (auto&& obj : batch) {
updateEntries.push_back([&] {
UpdateOpEntry entry;
- auto && [ q, u, c ] = obj;
+ auto&& [q, u, c] = obj;
entry.setQ(std::move(q));
entry.setU(std::move(u));
entry.setC(std::move(c));
@@ -306,8 +306,7 @@ void MongoInterfaceStandalone::renameIfOptionsAndIndexesHaveNotChanged(
str::stream() << "collection options of target collection " << targetNs.ns()
<< " changed during processing. Original options: "
<< originalCollectionOptions
- << ", new options: "
- << getCollectionOptions(targetNs),
+ << ", new options: " << getCollectionOptions(targetNs),
SimpleBSONObjComparator::kInstance.evaluate(originalCollectionOptions ==
getCollectionOptions(targetNs)));
@@ -432,12 +431,8 @@ boost::optional<Document> MongoInterfaceStandalone::lookupSingleDocument(
if (auto next = pipeline->getNext()) {
uasserted(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "found more than one document with document key "
- << documentKey.toString()
- << " ["
- << lookedUpDocument->toString()
- << ", "
- << next->toString()
- << "]");
+ << documentKey.toString() << " [" << lookedUpDocument->toString()
+ << ", " << next->toString() << "]");
}
// Set the speculative read timestamp appropriately after we do a document lookup locally. We
@@ -581,14 +576,12 @@ void MongoInterfaceStandalone::_reportCurrentOpsForIdleSessions(OperationContext
? makeSessionFilterForAuthenticatedUsers(opCtx)
: KillAllSessionsByPatternSet{{}});
- sessionCatalog->scanSessions(
- {std::move(sessionFilter)},
- [&](const ObservableSession& session) {
- auto op = TransactionParticipant::get(session).reportStashedState(opCtx);
- if (!op.isEmpty()) {
- ops->emplace_back(op);
- }
- });
+ sessionCatalog->scanSessions({std::move(sessionFilter)}, [&](const ObservableSession& session) {
+ auto op = TransactionParticipant::get(session).reportStashedState(opCtx);
+ if (!op.isEmpty()) {
+ ops->emplace_back(op);
+ }
+ });
}
std::unique_ptr<CollatorInterface> MongoInterfaceStandalone::_getCollectionDefaultCollator(
diff --git a/src/mongo/db/pipeline/process_interface_standalone_test.cpp b/src/mongo/db/pipeline/process_interface_standalone_test.cpp
index fa246fc2e9d..e522111e395 100644
--- a/src/mongo/db/pipeline/process_interface_standalone_test.cpp
+++ b/src/mongo/db/pipeline/process_interface_standalone_test.cpp
@@ -93,7 +93,7 @@ TEST_F(ProcessInterfaceStandaloneTest,
// Test that 'targetCollectionVersion' is accepted if from mongos.
expCtx->fromMongos = true;
- auto[joinKey, chunkVersion] = processInterface->ensureFieldsUniqueOrResolveDocumentKey(
+ auto [joinKey, chunkVersion] = processInterface->ensureFieldsUniqueOrResolveDocumentKey(
expCtx, {{"_id"}}, targetCollectionVersion, expCtx->ns);
ASSERT_EQ(joinKey.size(), 1UL);
ASSERT_EQ(joinKey.count(FieldPath("_id")), 1UL);
diff --git a/src/mongo/db/pipeline/resume_token.cpp b/src/mongo/db/pipeline/resume_token.cpp
index 02a3fdbccf3..0a9cdfe32c4 100644
--- a/src/mongo/db/pipeline/resume_token.cpp
+++ b/src/mongo/db/pipeline/resume_token.cpp
@@ -90,8 +90,9 @@ ResumeToken::ResumeToken(const Document& resumeDoc) {
_typeBits = resumeDoc[kTypeBitsFieldName];
uassert(40648,
str::stream() << "Bad resume token: _typeBits of wrong type " << resumeDoc.toString(),
- _typeBits.missing() || (_typeBits.getType() == BSONType::BinData &&
- _typeBits.getBinData().type == BinDataGeneral));
+ _typeBits.missing() ||
+ (_typeBits.getType() == BSONType::BinData &&
+ _typeBits.getBinData().type == BinDataGeneral));
}
// We encode the resume token as a KeyString with the sequence:
diff --git a/src/mongo/db/pipeline/resume_token_test.cpp b/src/mongo/db/pipeline/resume_token_test.cpp
index 72894880953..d684e30cc26 100644
--- a/src/mongo/db/pipeline/resume_token_test.cpp
+++ b/src/mongo/db/pipeline/resume_token_test.cpp
@@ -360,5 +360,5 @@ TEST(ResumeToken, StringEncodingSortsCorrectly) {
{ts10_4, 0, 0, lower_uuid, Value(Document{{"_id", 0}})});
}
-} // namspace
-} // namspace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
index 9c84317c8d9..1c32c251ff5 100644
--- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp
+++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
@@ -112,8 +112,7 @@ BSONObj genericTransformForShards(MutableDocument&& cmdForShards,
invariant(cmdForShards.peek()[OperationSessionInfo::kTxnNumberFieldName].missing(),
str::stream() << "Command for shards unexpectedly had the "
<< OperationSessionInfo::kTxnNumberFieldName
- << " field set: "
- << cmdForShards.peek().toString());
+ << " field set: " << cmdForShards.peek().toString());
cmdForShards[OperationSessionInfo::kTxnNumberFieldName] =
Value(static_cast<long long>(*opCtx->getTxnNumber()));
}
@@ -336,9 +335,7 @@ DispatchShardPipelineResults dispatchShardPipeline(
shardQuery);
invariant(cursors.size() % shardIds.size() == 0,
str::stream() << "Number of cursors (" << cursors.size()
- << ") is not a multiple of producers ("
- << shardIds.size()
- << ")");
+ << ") is not a multiple of producers (" << shardIds.size() << ")");
}
// Convert remote cursors into a vector of "owned" cursors.
@@ -350,9 +347,9 @@ DispatchShardPipelineResults dispatchShardPipeline(
// Record the number of shards involved in the aggregation. If we are required to merge on
// the primary shard, but the primary shard was not in the set of targeted shards, then we
// must increment the number of involved shards.
- CurOp::get(opCtx)->debug().nShards =
- shardIds.size() + (needsPrimaryShardMerge && executionNsRoutingInfo &&
- !shardIds.count(executionNsRoutingInfo->db().primaryId()));
+ CurOp::get(opCtx)->debug().nShards = shardIds.size() +
+ (needsPrimaryShardMerge && executionNsRoutingInfo &&
+ !shardIds.count(executionNsRoutingInfo->db().primaryId()));
return DispatchShardPipelineResults{needsPrimaryShardMerge,
std::move(ownedCursors),
diff --git a/src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp b/src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp
index a5b877a9e49..506acd514e8 100644
--- a/src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp
+++ b/src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp
@@ -93,12 +93,8 @@ boost::optional<Document> StubMongoProcessInterfaceLookupSingleDocument::lookupS
if (auto next = pipeline->getNext()) {
uasserted(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "found more than one document matching "
- << documentKey.toString()
- << " ["
- << lookedUpDocument->toString()
- << ", "
- << next->toString()
- << "]");
+ << documentKey.toString() << " [" << lookedUpDocument->toString()
+ << ", " << next->toString() << "]");
}
return lookedUpDocument;
}
diff --git a/src/mongo/db/pipeline/value.cpp b/src/mongo/db/pipeline/value.cpp
index 52a1c5fd71d..b804adaf797 100644
--- a/src/mongo/db/pipeline/value.cpp
+++ b/src/mongo/db/pipeline/value.cpp
@@ -389,8 +389,7 @@ void Value::addToBsonObj(BSONObjBuilder* builder,
size_t recursionLevel) const {
uassert(ErrorCodes::Overflow,
str::stream() << "cannot convert document to BSON because it exceeds the limit of "
- << BSONDepth::getMaxAllowableDepth()
- << " levels of nesting",
+ << BSONDepth::getMaxAllowableDepth() << " levels of nesting",
recursionLevel <= BSONDepth::getMaxAllowableDepth());
if (getType() == BSONType::Object) {
@@ -411,8 +410,7 @@ void Value::addToBsonObj(BSONObjBuilder* builder,
void Value::addToBsonArray(BSONArrayBuilder* builder, size_t recursionLevel) const {
uassert(ErrorCodes::Overflow,
str::stream() << "cannot convert document to BSON because it exceeds the limit of "
- << BSONDepth::getMaxAllowableDepth()
- << " levels of nesting",
+ << BSONDepth::getMaxAllowableDepth() << " levels of nesting",
recursionLevel <= BSONDepth::getMaxAllowableDepth());
// If this Value is empty, do nothing to avoid incrementing the builder's counter.
@@ -704,7 +702,7 @@ int Value::compare(const Value& rL,
case Date: // signed
return cmp(rL._storage.dateValue, rR._storage.dateValue);
- // Numbers should compare by equivalence even if different types
+ // Numbers should compare by equivalence even if different types
case NumberDecimal: {
switch (rType) {
@@ -1078,9 +1076,9 @@ size_t Value::getApproximateSize() const {
case Symbol:
case BinData:
case String:
- return sizeof(Value) + (_storage.shortStr
- ? 0 // string stored inline, so no extra mem usage
- : sizeof(RCString) + _storage.getString().size());
+ return sizeof(Value) +
+ (_storage.shortStr ? 0 // string stored inline, so no extra mem usage
+ : sizeof(RCString) + _storage.getString().size());
case Object:
return sizeof(Value) + getDocument().getApproximateSize();
diff --git a/src/mongo/db/pipeline/value.h b/src/mongo/db/pipeline/value.h
index ef0ac8b6afd..296d6d08480 100644
--- a/src/mongo/db/pipeline/value.h
+++ b/src/mongo/db/pipeline/value.h
@@ -146,7 +146,7 @@ public:
* Used when preforming arithmetic operations with int where the
* result may be too large and need to be stored as long. The Value
* will be an int if value fits, otherwise it will be a long.
- */
+ */
static Value createIntOrLong(long long value);
/** A "missing" value indicates the lack of a Value.
@@ -396,7 +396,7 @@ public:
return Value(values);
}
};
-}
+} // namespace mongo
/* ======================= INLINED IMPLEMENTATIONS ========================== */
diff --git a/src/mongo/db/pipeline/variables.cpp b/src/mongo/db/pipeline/variables.cpp
index cf6b81e9605..8a37fecc10f 100644
--- a/src/mongo/db/pipeline/variables.cpp
+++ b/src/mongo/db/pipeline/variables.cpp
@@ -68,9 +68,7 @@ void Variables::uassertValidNameForUserWrite(StringData varName) {
uassert(16868,
str::stream() << "'" << varName << "' contains an invalid character "
- << "for a variable name: '"
- << varName[i]
- << "'",
+ << "for a variable name: '" << varName[i] << "'",
charIsValid);
}
}
@@ -95,9 +93,7 @@ void Variables::uassertValidNameForUserRead(StringData varName) {
uassert(16871,
str::stream() << "'" << varName << "' contains an invalid character "
- << "for a variable name: '"
- << varName[i]
- << "'",
+ << "for a variable name: '" << varName[i] << "'",
charIsValid);
}
}
@@ -258,4 +254,4 @@ std::set<Variables::Id> VariablesParseState::getDefinedVariableIDs() const {
return ids;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/variables.h b/src/mongo/db/pipeline/variables.h
index 9627659b25b..b4826f1f460 100644
--- a/src/mongo/db/pipeline/variables.h
+++ b/src/mongo/db/pipeline/variables.h
@@ -156,7 +156,7 @@ private:
void setValue(Id id, const Value& value, bool isConstant);
static auto getBuiltinVariableName(Variables::Id variable) {
- for (auto & [ name, id ] : kBuiltinVarNameToId) {
+ for (auto& [name, id] : kBuiltinVarNameToId) {
if (variable == id) {
return name;
}
diff --git a/src/mongo/db/query/canonical_query_encoder.cpp b/src/mongo/db/query/canonical_query_encoder.cpp
index 91982bff80a..6698e56766c 100644
--- a/src/mongo/db/query/canonical_query_encoder.cpp
+++ b/src/mongo/db/query/canonical_query_encoder.cpp
@@ -427,10 +427,10 @@ void encodeKeyForMatch(const MatchExpression* tree, StringBuilder* keyBuilder) {
}
/**
-* Encodes sort order into cache key.
-* Sort order is normalized because it provided by
-* QueryRequest.
-*/
+ * Encodes sort order into cache key.
+ * Sort order is normalized because it provided by
+ * QueryRequest.
+ */
void encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) {
if (sortObj.isEmpty()) {
return;
@@ -463,12 +463,12 @@ void encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) {
}
/**
-* Encodes parsed projection into cache key.
-* Does a simple toString() on each projected field
-* in the BSON object.
-* Orders the encoded elements in the projection by field name.
-* This handles all the special projection types ($meta, $elemMatch, etc.)
-*/
+ * Encodes parsed projection into cache key.
+ * Does a simple toString() on each projected field
+ * in the BSON object.
+ * Orders the encoded elements in the projection by field name.
+ * This handles all the special projection types ($meta, $elemMatch, etc.)
+ */
void encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuilder) {
// Sorts the BSON elements by field name using a map.
std::map<StringData, BSONElement> elements;
diff --git a/src/mongo/db/query/canonical_query_encoder.h b/src/mongo/db/query/canonical_query_encoder.h
index d0019ba08c9..73c0eff5fa7 100644
--- a/src/mongo/db/query/canonical_query_encoder.h
+++ b/src/mongo/db/query/canonical_query_encoder.h
@@ -45,5 +45,5 @@ CanonicalQuery::QueryShapeString encode(const CanonicalQuery& cq);
* Returns a hash of the given key (produced from either a QueryShapeString or a PlanCacheKey).
*/
uint32_t computeHash(StringData key);
-}
-}
+} // namespace canonical_query_encoder
+} // namespace mongo
diff --git a/src/mongo/db/query/collation/collation_index_key.cpp b/src/mongo/db/query/collation/collation_index_key.cpp
index 3af408e8abd..a973f419f06 100644
--- a/src/mongo/db/query/collation/collation_index_key.cpp
+++ b/src/mongo/db/query/collation/collation_index_key.cpp
@@ -114,9 +114,7 @@ void translateElement(StringData fieldName,
uasserted(ErrorCodes::CannotBuildIndexKeys,
str::stream()
<< "Cannot index type Symbol with a collation. Failed to index element: "
- << element
- << ". Index collation: "
- << collator->getSpec().toBSON());
+ << element << ". Index collation: " << collator->getSpec().toBSON());
}
default:
out->appendAs(element, fieldName);
@@ -144,7 +142,7 @@ void translate(BSONObj obj, const CollatorInterface* collator, BufBuilder* out)
element.fieldNameStringData(), element, collator, &ctx.getBuilder(), &ctxStack);
}
}
-}
+} // namespace
void CollationIndexKey::collationAwareIndexKeyAppend(BSONElement elt,
const CollatorInterface* collator,
diff --git a/src/mongo/db/query/collation/collation_index_key_test.cpp b/src/mongo/db/query/collation/collation_index_key_test.cpp
index 7696561060a..20a788d7df4 100644
--- a/src/mongo/db/query/collation/collation_index_key_test.cpp
+++ b/src/mongo/db/query/collation/collation_index_key_test.cpp
@@ -171,8 +171,7 @@ TEST(CollationIndexKeyTest, CollationAwareAppendThrowsIfSymbolInsideObject) {
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
BSONObj dataObj = BSON("" << BSON("a"
<< "foo"
- << "b"
- << BSONSymbol("mySymbol")));
+ << "b" << BSONSymbol("mySymbol")));
BSONObjBuilder out;
ASSERT_THROWS_CODE(
CollationIndexKey::collationAwareIndexKeyAppend(dataObj.firstElement(), &collator, &out),
diff --git a/src/mongo/db/query/collation/collation_spec_test.cpp b/src/mongo/db/query/collation/collation_spec_test.cpp
index 8036e463a54..c255476292e 100644
--- a/src/mongo/db/query/collation/collation_spec_test.cpp
+++ b/src/mongo/db/query/collation/collation_spec_test.cpp
@@ -185,23 +185,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesDefaults) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -215,23 +205,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesCaseFirstUpper) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "upper"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -245,23 +225,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesCaseFirstLower) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "lower"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -275,23 +245,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesPrimaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 1
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 1 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -305,23 +265,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesSecondaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 2
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 2 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -335,23 +285,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesQuaternaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 4
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 4 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -365,23 +305,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesIdenticalStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 5
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 5 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -395,23 +325,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesAlternateShifted) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "shifted"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -425,23 +345,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesMaxVariableSpace) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "space"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
diff --git a/src/mongo/db/query/collation/collator_factory_icu.cpp b/src/mongo/db/query/collation/collator_factory_icu.cpp
index c8b8de7a5ab..507ef83cd91 100644
--- a/src/mongo/db/query/collation/collator_factory_icu.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu.cpp
@@ -185,13 +185,9 @@ StatusWith<CollationSpec::CaseFirstType> stringToCaseFirstType(const std::string
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kCaseFirstField << "' must be '"
- << CollationSpec::kCaseFirstUpper
- << "', '"
- << CollationSpec::kCaseFirstLower
- << "', or '"
- << CollationSpec::kCaseFirstOff
- << "'. Got: "
- << caseFirst};
+ << CollationSpec::kCaseFirstUpper << "', '"
+ << CollationSpec::kCaseFirstLower << "', or '"
+ << CollationSpec::kCaseFirstOff << "'. Got: " << caseFirst};
}
}
@@ -210,8 +206,7 @@ StatusWith<CollationSpec::StrengthType> integerToStrengthType(long long strength
}
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kStrengthField
- << "' must be an integer 1 through 5. Got: "
- << strength};
+ << "' must be an integer 1 through 5. Got: " << strength};
}
StatusWith<CollationSpec::AlternateType> stringToAlternateType(const std::string& alternate) {
@@ -222,11 +217,8 @@ StatusWith<CollationSpec::AlternateType> stringToAlternateType(const std::string
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kAlternateField << "' must be '"
- << CollationSpec::kAlternateNonIgnorable
- << "' or '"
- << CollationSpec::kAlternateShifted
- << "'. Got: "
- << alternate};
+ << CollationSpec::kAlternateNonIgnorable << "' or '"
+ << CollationSpec::kAlternateShifted << "'. Got: " << alternate};
}
}
@@ -238,11 +230,8 @@ StatusWith<CollationSpec::MaxVariableType> stringToMaxVariableType(const std::st
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kMaxVariableField << "' must be '"
- << CollationSpec::kMaxVariablePunct
- << "' or '"
- << CollationSpec::kMaxVariableSpace
- << "'. Got: "
- << maxVariable};
+ << CollationSpec::kMaxVariablePunct << "' or '"
+ << CollationSpec::kMaxVariableSpace << "'. Got: " << maxVariable};
}
}
@@ -272,10 +261,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kCaseLevelField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.caseLevel = attributeToBool(caseLevelAttribute);
} else if (!parseStatus.isOK()) {
@@ -289,10 +276,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kCaseLevelField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -307,10 +292,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kCaseFirstField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.caseFirst = getCaseFirstFromAttribute(caseFirstAttribute);
} else if (!parseStatus.isOK()) {
@@ -332,10 +315,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kCaseFirstField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -350,10 +331,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kStrengthField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.strength = getStrengthFromAttribute(strengthAttribute);
} else if (!parseStatus.isOK()) {
@@ -374,10 +353,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kStrengthField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -393,10 +370,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kNumericOrderingField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.numericOrdering = attributeToBool(numericOrderingAttribute);
} else if (!parseStatus.isOK()) {
@@ -411,10 +386,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kNumericOrderingField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -430,10 +403,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kAlternateField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.alternate = getAlternateFromAttribute(alternateAttribute);
} else if (!parseStatus.isOK()) {
@@ -455,10 +426,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kAlternateField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -485,10 +454,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kMaxVariableField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -504,10 +471,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kNormalizationField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.normalization = attributeToBool(normalizationAttribute);
} else if (!parseStatus.isOK()) {
@@ -522,10 +487,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kNormalizationField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -541,10 +504,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kBackwardsField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.backwards = attributeToBool(backwardsAttribute);
} else if (!parseStatus.isOK()) {
@@ -559,10 +520,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kBackwardsField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -584,9 +543,7 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
return {ErrorCodes::IncompatibleCollationVersion,
str::stream() << "Requested collation version " << specVersionStr
<< " but the only available collator version was "
- << parsedSpec.version
- << ". Requested collation spec: "
- << spec};
+ << parsedSpec.version << ". Requested collation spec: " << spec};
}
++parsedFields;
@@ -612,8 +569,7 @@ StatusWith<std::string> parseLocaleID(const BSONObj& spec) {
if (localeID.find('\0') != std::string::npos) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << CollationSpec::kLocaleField
- << "' cannot contain null byte. Collation spec: "
- << spec};
+ << "' cannot contain null byte. Collation spec: " << spec};
}
return localeID;
}
@@ -629,15 +585,13 @@ Status validateLocaleID(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get locale from icu::Collator: " << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << ". Collation spec: " << spec};
}
if (originalID.empty()) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << CollationSpec::kLocaleField
- << "' cannot be the empty string in: "
- << spec};
+ << "' cannot be the empty string in: " << spec};
}
// Check that each component of the locale ID is recognized by ICU. If ICU 1) cannot parse the
@@ -668,11 +622,9 @@ Status validateCollationSpec(const CollationSpec& spec) {
if (spec.backwards && spec.strength == CollationSpec::StrengthType::kPrimary) {
return {ErrorCodes::BadValue,
str::stream() << "'" << CollationSpec::kBackwardsField << "' is invalid with '"
- << CollationSpec::kStrengthField
- << "' of "
+ << CollationSpec::kStrengthField << "' of "
<< static_cast<int>(CollationSpec::StrengthType::kPrimary)
- << " in: "
- << spec.toBSON()};
+ << " in: " << spec.toBSON()};
}
// The caseFirst option only affects tertiary level or caseLevel comparisons. It will have no
@@ -682,13 +634,10 @@ Status validateCollationSpec(const CollationSpec& spec) {
spec.strength == CollationSpec::StrengthType::kSecondary)) {
return {ErrorCodes::BadValue,
str::stream() << "'" << CollationSpec::kCaseFirstField << "' is invalid unless '"
- << CollationSpec::kCaseLevelField
- << "' is on or '"
- << CollationSpec::kStrengthField
- << "' is greater than "
+ << CollationSpec::kCaseLevelField << "' is on or '"
+ << CollationSpec::kStrengthField << "' is greater than "
<< static_cast<int>(CollationSpec::StrengthType::kSecondary)
- << " in: "
- << spec.toBSON()};
+ << " in: " << spec.toBSON()};
}
return Status::OK();
@@ -711,8 +660,7 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
return {ErrorCodes::FailedToParse,
str::stream() << "If " << CollationSpec::kLocaleField << "="
<< CollationSpec::kSimpleBinaryComparison
- << ", no other fields should be present in: "
- << spec};
+ << ", no other fields should be present in: " << spec};
}
return {nullptr};
}
@@ -721,8 +669,8 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
auto userLocale = icu::Locale::createFromName(parsedLocaleID.getValue().c_str());
if (userLocale.isBogus()) {
return {ErrorCodes::BadValue,
- str::stream() << "Field '" << CollationSpec::kLocaleField << "' is not valid in: "
- << spec};
+ str::stream() << "Field '" << CollationSpec::kLocaleField
+ << "' is not valid in: " << spec};
}
// Construct an icu::Collator.
@@ -733,8 +681,7 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to create collator: " << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << ". Collation spec: " << spec};
}
Status localeValidationStatus = validateLocaleID(spec, parsedLocaleID.getValue(), *icuCollator);
diff --git a/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp b/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
index df6233398ba..9c540817ec7 100644
--- a/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
@@ -39,9 +39,7 @@ namespace mongo {
namespace {
ServiceContext::ConstructorActionRegisterer registerIcuCollator{
- "CreateCollatorFactory",
- {"LoadICUData"},
- [](ServiceContext* service) {
+ "CreateCollatorFactory", {"LoadICUData"}, [](ServiceContext* service) {
CollatorFactoryInterface::set(service, stdx::make_unique<CollatorFactoryICU>());
}};
} // namespace
diff --git a/src/mongo/db/query/collation/collator_factory_icu_test.cpp b/src/mongo/db/query/collation/collator_factory_icu_test.cpp
index 052e03decee..275ae55ff84 100644
--- a/src/mongo/db/query/collation/collator_factory_icu_test.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu_test.cpp
@@ -60,8 +60,7 @@ TEST(CollatorFactoryICUTest, SimpleLocaleWithOtherFieldsFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "simple"
- << "caseLevel"
- << true));
+ << "caseLevel" << true));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -443,8 +442,7 @@ TEST(CollatorFactoryICUTest, CaseLevelFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseLevel"
- << false));
+ << "caseLevel" << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().caseLevel);
}
@@ -453,8 +451,7 @@ TEST(CollatorFactoryICUTest, CaseLevelTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseLevel"
- << true));
+ << "caseLevel" << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().caseLevel);
}
@@ -496,8 +493,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -507,8 +503,7 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kSecondary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -518,8 +513,7 @@ TEST(CollatorFactoryICUTest, TertiaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 3));
+ << "strength" << 3));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kTertiary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -529,8 +523,7 @@ TEST(CollatorFactoryICUTest, QuaternaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 4));
+ << "strength" << 4));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kQuaternary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -540,8 +533,7 @@ TEST(CollatorFactoryICUTest, IdenticalStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 5));
+ << "strength" << 5));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kIdentical),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -551,8 +543,7 @@ TEST(CollatorFactoryICUTest, NumericOrderingFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering"
- << false));
+ << "numericOrdering" << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().numericOrdering);
}
@@ -561,8 +552,7 @@ TEST(CollatorFactoryICUTest, NumericOrderingTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering"
- << true));
+ << "numericOrdering" << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().numericOrdering);
}
@@ -615,8 +605,7 @@ TEST(CollatorFactoryICUTest, NormalizationFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "normalization"
- << false));
+ << "normalization" << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().normalization);
}
@@ -625,8 +614,7 @@ TEST(CollatorFactoryICUTest, NormalizationTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "normalization"
- << true));
+ << "normalization" << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().normalization);
}
@@ -635,8 +623,7 @@ TEST(CollatorFactoryICUTest, BackwardsFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards"
- << false));
+ << "backwards" << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().backwards);
}
@@ -645,8 +632,7 @@ TEST(CollatorFactoryICUTest, BackwardsTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards"
- << true));
+ << "backwards" << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().backwards);
}
@@ -655,8 +641,7 @@ TEST(CollatorFactoryICUTest, LongStrengthFieldParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1LL));
+ << "strength" << 1LL));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -666,8 +651,7 @@ TEST(CollatorFactoryICUTest, DoubleStrengthFieldParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1.0));
+ << "strength" << 1.0));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -687,8 +671,7 @@ TEST(CollatorFactoryICUTest, NonStringCaseFirstFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseFirst"
- << 1));
+ << "caseFirst" << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -717,8 +700,7 @@ TEST(CollatorFactoryICUTest, TooLargeStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2147483648LL));
+ << "strength" << 2147483648LL));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -727,8 +709,7 @@ TEST(CollatorFactoryICUTest, FractionalStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 0.5));
+ << "strength" << 0.5));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::BadValue);
}
@@ -737,8 +718,7 @@ TEST(CollatorFactoryICUTest, NegativeStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << -1));
+ << "strength" << -1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -747,8 +727,7 @@ TEST(CollatorFactoryICUTest, InvalidIntegerStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 6));
+ << "strength" << 6));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -767,8 +746,7 @@ TEST(CollatorFactoryICUTest, NonStringAlternateFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "alternate"
- << 1));
+ << "alternate" << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -787,8 +765,7 @@ TEST(CollatorFactoryICUTest, NonStringMaxVariableFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "maxVariable"
- << 1));
+ << "maxVariable" << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -845,8 +822,7 @@ TEST(CollatorFactoryICUTest, NonStringVersionFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "version"
- << 3));
+ << "version" << 3));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -878,8 +854,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCollatorIgnoresCaseAndAccents) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -891,8 +866,7 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthCollatorsIgnoresCaseButNotAccents)
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -904,8 +878,7 @@ TEST(CollatorFactoryICUTest, TertiaryStrengthCollatorConsidersCaseAndAccents) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 3));
+ << "strength" << 3));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -917,10 +890,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1
- << "caseLevel"
- << true));
+ << "strength" << 1 << "caseLevel" << true));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -930,14 +900,11 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrue) {
TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrueCaseFirstUpper) {
CollatorFactoryICU factory;
- auto collator = factory.makeFromBSON(BSON("locale"
- << "en_US"
- << "strength"
- << 1
- << "caseLevel"
- << true
- << "caseFirst"
- << "upper"));
+ auto collator =
+ factory.makeFromBSON(BSON("locale"
+ << "en_US"
+ << "strength" << 1 << "caseLevel" << true << "caseFirst"
+ << "upper"));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -947,14 +914,11 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrueCaseFirstUpper) {
TEST(CollatorFactoryICUTest, TertiaryStrengthCaseLevelTrueCaseFirstUpper) {
CollatorFactoryICU factory;
- auto collator = factory.makeFromBSON(BSON("locale"
- << "en_US"
- << "strength"
- << 3
- << "caseLevel"
- << true
- << "caseFirst"
- << "upper"));
+ auto collator =
+ factory.makeFromBSON(BSON("locale"
+ << "en_US"
+ << "strength" << 3 << "caseLevel" << true << "caseFirst"
+ << "upper"));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("A", "a"), 0);
}
@@ -971,8 +935,7 @@ TEST(CollatorFactoryICUTest, NumericOrderingTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering"
- << true));
+ << "numericOrdering" << true));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("2", "10"), 0);
}
@@ -981,9 +944,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthAlternateShifted) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1
- << "alternate"
+ << "strength" << 1 << "alternate"
<< "shifted"));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(collator.getValue()->compare("a b", "ab"), 0);
@@ -994,9 +955,7 @@ TEST(CollatorFactoryICUTest, QuaternaryStrengthAlternateShifted) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 4
- << "alternate"
+ << "strength" << 4 << "alternate"
<< "shifted"));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("a b", "ab"), 0);
@@ -1007,9 +966,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthAlternateShiftedMaxVariableSpace) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1
- << "alternate"
+ << "strength" << 1 << "alternate"
<< "shifted"
<< "maxVariable"
<< "space"));
@@ -1022,8 +979,7 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthBackwardsFalse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -1034,10 +990,7 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthBackwardsTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2
- << "backwards"
- << true));
+ << "strength" << 2 << "backwards" << true));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -1068,10 +1021,7 @@ TEST(CollatorFactoryICUTest, BackwardsTrueWithStrengthOneFails) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards"
- << true
- << "strength"
- << 1));
+ << "backwards" << true << "strength" << 1));
ASSERT_NOT_OK(collator.getStatus());
}
@@ -1079,10 +1029,7 @@ TEST(CollatorFactoryICUTest, BackwardsTrueWithStrengthTwoSucceeds) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards"
- << true
- << "strength"
- << 2));
+ << "backwards" << true << "strength" << 2));
ASSERT_OK(collator.getStatus());
}
@@ -1092,8 +1039,7 @@ TEST(CollatorFactoryICUTest, CaseFirstLowerWithStrengthThreeSucceeds) {
<< "en_US"
<< "caseFirst"
<< "lower"
- << "strength"
- << 3));
+ << "strength" << 3));
ASSERT_OK(collator.getStatus());
}
@@ -1103,8 +1049,7 @@ TEST(CollatorFactoryICUTest, CaseFirstUpperWithStrengthThreeSucceeds) {
<< "en_US"
<< "caseFirst"
<< "upper"
- << "strength"
- << 3));
+ << "strength" << 3));
ASSERT_OK(collator.getStatus());
}
@@ -1114,10 +1059,7 @@ TEST(CollatorFactoryICUTest, CaseFirstLowerWithCaseLevelSucceeds) {
<< "en_US"
<< "caseFirst"
<< "lower"
- << "caseLevel"
- << true
- << "strength"
- << 1));
+ << "caseLevel" << true << "strength" << 1));
ASSERT_OK(collator.getStatus());
}
@@ -1127,10 +1069,7 @@ TEST(CollatorFactoryICUTest, CaseFirstUpperWithCaseLevelSucceeds) {
<< "en_US"
<< "caseFirst"
<< "upper"
- << "caseLevel"
- << true
- << "strength"
- << 1));
+ << "caseLevel" << true << "strength" << 1));
ASSERT_OK(collator.getStatus());
}
@@ -1140,8 +1079,7 @@ TEST(CollatorFactoryICUTest, CaseFirstOffWithStrengthOneSucceeds) {
<< "en_US"
<< "caseFirst"
<< "off"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_OK(collator.getStatus());
}
@@ -1151,8 +1089,7 @@ TEST(CollatorFactoryICUTest, CaseFirstLowerWithStrengthOneFails) {
<< "en_US"
<< "caseFirst"
<< "lower"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_NOT_OK(collator.getStatus());
}
@@ -1162,8 +1099,7 @@ TEST(CollatorFactoryICUTest, CaseFirstLowerWithStrengthTwoFails) {
<< "en_US"
<< "caseFirst"
<< "lower"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_NOT_OK(collator.getStatus());
}
@@ -1173,8 +1109,7 @@ TEST(CollatorFactoryICUTest, CaseFirstUpperWithStrengthOneFails) {
<< "en_US"
<< "caseFirst"
<< "upper"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_NOT_OK(collator.getStatus());
}
@@ -1184,8 +1119,7 @@ TEST(CollatorFactoryICUTest, CaseFirstUpperWithStrengthTwoFails) {
<< "en_US"
<< "caseFirst"
<< "upper"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_NOT_OK(collator.getStatus());
}
diff --git a/src/mongo/db/query/collation/collator_interface_mock_test.cpp b/src/mongo/db/query/collation/collator_interface_mock_test.cpp
index d792d95c2a1..340e9690ef6 100644
--- a/src/mongo/db/query/collation/collator_interface_mock_test.cpp
+++ b/src/mongo/db/query/collation/collator_interface_mock_test.cpp
@@ -242,10 +242,12 @@ TEST(CollatorInterfaceMockSelfTest, BSONObjsEqualUnderCollatorHashEquallyNested)
SimpleBSONObjComparator bsonCmpConsiderCase;
BSONObjComparator bsonCmpIgnoreCase(
BSONObj(), BSONObjComparator::FieldNamesMode::kConsider, &toLowerCollator);
- BSONObj obj1 = BSON("a" << 1 << "b" << BSON("c"
- << "foo"));
- BSONObj obj2 = BSON("a" << 1 << "b" << BSON("c"
- << "FOO"));
+ BSONObj obj1 = BSON("a" << 1 << "b"
+ << BSON("c"
+ << "foo"));
+ BSONObj obj2 = BSON("a" << 1 << "b"
+ << BSON("c"
+ << "FOO"));
ASSERT_NE(bsonCmpConsiderCase.hash(obj1), bsonCmpConsiderCase.hash(obj2));
ASSERT_EQ(bsonCmpIgnoreCase.hash(obj1), bsonCmpIgnoreCase.hash(obj2));
}
diff --git a/src/mongo/db/query/count_command_test.cpp b/src/mongo/db/query/count_command_test.cpp
index c660bc6adec..b7ea431f678 100644
--- a/src/mongo/db/query/count_command_test.cpp
+++ b/src/mongo/db/query/count_command_test.cpp
@@ -50,8 +50,7 @@ TEST(CountCommandTest, ParserDealsWithMissingFieldsCorrectly) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "query"
- << BSON("a" << BSON("$lte" << 10)));
+ << "query" << BSON("a" << BSON("$lte" << 10)));
auto countCmd = CountCommand::parse(ctxt, commandObj);
ASSERT_BSONOBJ_EQ(countCmd.getQuery(), fromjson("{ a : { '$lte' : 10 } }"));
@@ -70,15 +69,8 @@ TEST(CountCommandTest, ParserParsesCommandWithAllFieldsCorrectly) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "query"
- << BSON("a" << BSON("$gte" << 11))
- << "limit"
- << 100
- << "skip"
- << 1000
- << "hint"
- << BSON("b" << 5)
- << "collation"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "limit" << 100 << "skip"
+ << 1000 << "hint" << BSON("b" << 5) << "collation"
<< BSON("locale"
<< "en_US")
<< "readConcern"
@@ -89,8 +81,7 @@ TEST(CountCommandTest, ParserParsesCommandWithAllFieldsCorrectly) {
<< "secondary")
<< "comment"
<< "aComment"
- << "maxTimeMS"
- << 10000);
+ << "maxTimeMS" << 10000);
const auto countCmd = CountCommand::parse(ctxt, commandObj);
ASSERT_BSONOBJ_EQ(countCmd.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
@@ -110,8 +101,7 @@ TEST(CountCommandTest, ParsingNegativeLimitGivesPositiveLimit) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "limit"
- << -100);
+ << "limit" << -100);
const auto countCmd = CountCommand::parse(ctxt, commandObj);
ASSERT_EQ(countCmd.getLimit().get(), 100);
@@ -122,9 +112,7 @@ TEST(CountCommandTest, LimitCannotBeMinLong) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "query"
- << BSON("a" << BSON("$gte" << 11))
- << "limit"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "limit"
<< std::numeric_limits<long long>::min());
ASSERT_THROWS_CODE(
@@ -132,31 +120,28 @@ TEST(CountCommandTest, LimitCannotBeMinLong) {
}
TEST(CountCommandTest, FailParseBadSkipValue) {
- ASSERT_THROWS_CODE(CountCommand::parse(ctxt,
- BSON("count"
- << "TestColl"
- << "$db"
- << "TestDB"
- << "query"
- << BSON("a" << BSON("$gte" << 11))
- << "skip"
- << -1000)),
- AssertionException,
- ErrorCodes::FailedToParse);
+ ASSERT_THROWS_CODE(
+ CountCommand::parse(ctxt,
+ BSON("count"
+ << "TestColl"
+ << "$db"
+ << "TestDB"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "skip" << -1000)),
+ AssertionException,
+ ErrorCodes::FailedToParse);
}
TEST(CountCommandTest, FailParseBadCollationType) {
- ASSERT_THROWS_CODE(CountCommand::parse(ctxt,
- BSON("count"
- << "TestColl"
- << "$db"
- << "TestDB"
- << "query"
- << BSON("a" << BSON("$gte" << 11))
- << "collation"
- << "en_US")),
- AssertionException,
- ErrorCodes::TypeMismatch);
+ ASSERT_THROWS_CODE(
+ CountCommand::parse(ctxt,
+ BSON("count"
+ << "TestColl"
+ << "$db"
+ << "TestDB"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "collation"
+ << "en_US")),
+ AssertionException,
+ ErrorCodes::TypeMismatch);
}
TEST(CountCommandTest, FailParseUnknownField) {
@@ -176,8 +161,7 @@ TEST(CountCommandTest, ConvertToAggregationWithHint) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "hint"
- << BSON("x" << 1));
+ << "hint" << BSON("x" << 1));
auto countCmd = CountCommand::parse(ctxt, commandObj);
auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns));
@@ -198,12 +182,7 @@ TEST(CountCommandTest, ConvertToAggregationWithQueryAndFilterAndLimit) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "limit"
- << 200
- << "skip"
- << 300
- << "query"
- << BSON("x" << 7));
+ << "limit" << 200 << "skip" << 300 << "query" << BSON("x" << 7));
auto countCmd = CountCommand::parse(ctxt, commandObj);
auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns));
@@ -227,9 +206,7 @@ TEST(CountCommandTest, ConvertToAggregationWithMaxTimeMS) {
auto countCmd = CountCommand::parse(ctxt,
BSON("count"
<< "TestColl"
- << "maxTimeMS"
- << 100
- << "$db"
+ << "maxTimeMS" << 100 << "$db"
<< "TestDB"));
auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns));
diff --git a/src/mongo/db/query/cursor_response.cpp b/src/mongo/db/query/cursor_response.cpp
index f9fcf3c7af9..f62c57fe40f 100644
--- a/src/mongo/db/query/cursor_response.cpp
+++ b/src/mongo/db/query/cursor_response.cpp
@@ -175,24 +175,24 @@ StatusWith<CursorResponse> CursorResponse::parseFromBSON(const BSONObj& cmdRespo
BSONElement cursorElt = cmdResponse[kCursorField];
if (cursorElt.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kCursorField << "' must be a nested object in: "
- << cmdResponse};
+ str::stream() << "Field '" << kCursorField
+ << "' must be a nested object in: " << cmdResponse};
}
BSONObj cursorObj = cursorElt.Obj();
BSONElement idElt = cursorObj[kIdField];
if (idElt.type() != BSONType::NumberLong) {
- return {
- ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kIdField << "' must be of type long in: " << cmdResponse};
+ return {ErrorCodes::TypeMismatch,
+ str::stream() << "Field '" << kIdField
+ << "' must be of type long in: " << cmdResponse};
}
cursorId = idElt.Long();
BSONElement nsElt = cursorObj[kNsField];
if (nsElt.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kNsField << "' must be of type string in: "
- << cmdResponse};
+ str::stream() << "Field '" << kNsField
+ << "' must be of type string in: " << cmdResponse};
}
fullns = nsElt.String();
@@ -204,9 +204,7 @@ StatusWith<CursorResponse> CursorResponse::parseFromBSON(const BSONObj& cmdRespo
if (batchElt.type() != BSONType::Array) {
return {ErrorCodes::TypeMismatch,
str::stream() << "Must have array field '" << kBatchFieldInitial << "' or '"
- << kBatchField
- << "' in: "
- << cmdResponse};
+ << kBatchField << "' in: " << cmdResponse};
}
batchObj = batchElt.Obj();
diff --git a/src/mongo/db/query/cursor_response_test.cpp b/src/mongo/db/query/cursor_response_test.cpp
index 6a3a2229813..952edb125f4 100644
--- a/src/mongo/db/query/cursor_response_test.cpp
+++ b/src/mongo/db/query/cursor_response_test.cpp
@@ -41,13 +41,11 @@ namespace mongo {
namespace {
TEST(CursorResponseTest, parseFromBSONFirstBatch) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -60,13 +58,11 @@ TEST(CursorResponseTest, parseFromBSONFirstBatch) {
}
TEST(CursorResponseTest, parseFromBSONNextBatch) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -79,13 +75,11 @@ TEST(CursorResponseTest, parseFromBSONNextBatch) {
}
TEST(CursorResponseTest, parseFromBSONCursorIdZero) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(0) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
+ "cursor" << BSON("id" << CursorId(0) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -97,13 +91,11 @@ TEST(CursorResponseTest, parseFromBSONCursorIdZero) {
}
TEST(CursorResponseTest, parseFromBSONEmptyBatch) {
- StatusWith<CursorResponse> result =
- CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSONArrayBuilder().arr())
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSONArrayBuilder().arr())
+ << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -113,15 +105,11 @@ TEST(CursorResponseTest, parseFromBSONEmptyBatch) {
}
TEST(CursorResponseTest, parseFromBSONLatestOplogEntry) {
- StatusWith<CursorResponse> result =
- CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSONArrayBuilder().arr())
- << "$_internalLatestOplogTimestamp"
- << Timestamp(1, 2)
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSONArrayBuilder().arr())
+ << "$_internalLatestOplogTimestamp" << Timestamp(1, 2) << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -146,8 +134,7 @@ TEST(CursorResponseTest, parseFromBSONNsFieldMissing) {
StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
BSON("cursor" << BSON("id" << CursorId(123) << "firstBatch"
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -155,8 +142,7 @@ TEST(CursorResponseTest, parseFromBSONNsFieldWrongType) {
StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
BSON("cursor" << BSON("id" << CursorId(123) << "ns" << 456 << "firstBatch"
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -164,10 +150,8 @@ TEST(CursorResponseTest, parseFromBSONIdFieldMissing) {
StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
BSON("cursor" << BSON("ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -177,10 +161,8 @@ TEST(CursorResponseTest, parseFromBSONIdFieldWrongType) {
<< "123"
<< "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -188,19 +170,16 @@ TEST(CursorResponseTest, parseFromBSONBatchFieldMissing) {
StatusWith<CursorResponse> result =
CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
<< "db.coll")
- << "ok"
- << 1));
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONFirstBatchFieldWrongType) {
- StatusWith<CursorResponse> result =
- CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "firstBatch"
- << BSON("_id" << 1))
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "firstBatch" << BSON("_id" << 1))
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -208,32 +187,25 @@ TEST(CursorResponseTest, parseFromBSONNextBatchFieldWrongType) {
StatusWith<CursorResponse> result =
CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON("_id" << 1))
- << "ok"
- << 1));
+ << "nextBatch" << BSON("_id" << 1))
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONLatestOplogEntryWrongType) {
- StatusWith<CursorResponse> result =
- CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1)))
- << "$_internalLatestOplogTimestamp"
- << 1
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1)))
+ << "$_internalLatestOplogTimestamp" << 1 << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONOkFieldMissing) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))));
ASSERT_NOT_OK(result.getStatus());
}
@@ -250,13 +222,11 @@ TEST(CursorResponseTest, toBSONInitialResponse) {
std::vector<BSONObj> batch = {BSON("_id" << 1), BSON("_id" << 2)};
CursorResponse response(NamespaceString("testdb.testcoll"), CursorId(123), batch);
BSONObj responseObj = response.toBSON(CursorResponse::ResponseType::InitialResponse);
- BSONObj expectedResponse =
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1.0);
+ BSONObj expectedResponse = BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
@@ -264,13 +234,11 @@ TEST(CursorResponseTest, toBSONSubsequentResponse) {
std::vector<BSONObj> batch = {BSON("_id" << 1), BSON("_id" << 2)};
CursorResponse response(NamespaceString("testdb.testcoll"), CursorId(123), batch);
BSONObj responseObj = response.toBSON(CursorResponse::ResponseType::SubsequentResponse);
- BSONObj expectedResponse =
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1.0);
+ BSONObj expectedResponse = BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
@@ -282,13 +250,11 @@ TEST(CursorResponseTest, addToBSONInitialResponse) {
response.addToBSON(CursorResponse::ResponseType::InitialResponse, &builder);
BSONObj responseObj = builder.obj();
- BSONObj expectedResponse =
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1.0);
+ BSONObj expectedResponse = BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
@@ -300,13 +266,11 @@ TEST(CursorResponseTest, addToBSONSubsequentResponse) {
response.addToBSON(CursorResponse::ResponseType::SubsequentResponse, &builder);
BSONObj responseObj = builder.obj();
- BSONObj expectedResponse =
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1.0);
+ BSONObj expectedResponse = BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
@@ -321,10 +285,7 @@ TEST(CursorResponseTest, serializeLatestOplogEntry) {
<< "db.coll"
<< "nextBatch"
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "$_internalLatestOplogTimestamp"
- << Timestamp(1, 2)
- << "ok"
- << 1));
+ << "$_internalLatestOplogTimestamp" << Timestamp(1, 2) << "ok" << 1));
auto reparsed = CursorResponse::parseFromBSON(serialized);
ASSERT_OK(reparsed.getStatus());
CursorResponse reparsedResponse = std::move(reparsed.getValue());
@@ -350,10 +311,8 @@ TEST(CursorResponseTest, serializePostBatchResumeToken) {
<< "db.coll"
<< "nextBatch"
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2))
- << "postBatchResumeToken"
- << postBatchResumeToken)
- << "ok"
- << 1));
+ << "postBatchResumeToken" << postBatchResumeToken)
+ << "ok" << 1));
auto reparsed = CursorResponse::parseFromBSON(serialized);
ASSERT_OK(reparsed.getStatus());
CursorResponse reparsedResponse = std::move(reparsed.getValue());
diff --git a/src/mongo/db/query/datetime/date_time_support.cpp b/src/mongo/db/query/datetime/date_time_support.cpp
index 01397b1c605..8229dd2d13f 100644
--- a/src/mongo/db/query/datetime/date_time_support.cpp
+++ b/src/mongo/db/query/datetime/date_time_support.cpp
@@ -180,9 +180,7 @@ void TimeZoneDatabase::loadTimeZoneInfo(
40475,
{ErrorCodes::FailedToParse,
str::stream() << "failed to parse time zone file for time zone identifier \""
- << entry.id
- << "\": "
- << timelib_get_error_message(errorCode)});
+ << entry.id << "\": " << timelib_get_error_message(errorCode)});
}
invariant(errorCode == TIMELIB_ERROR_NO_ERROR);
@@ -276,8 +274,7 @@ Date_t TimeZoneDatabase::fromString(StringData dateString,
uasserted(ErrorCodes::ConversionFailure,
str::stream()
<< "an incomplete date/time string has been found, with elements missing: \""
- << dateString
- << "\"");
+ << dateString << "\"");
}
if (!tz.isUtcZone()) {
@@ -295,8 +292,7 @@ Date_t TimeZoneDatabase::fromString(StringData dateString,
ErrorCodes::ConversionFailure,
str::stream()
<< "you cannot pass in a date/time string with time zone information ('"
- << parsedTime.get()->tz_abbr
- << "') together with a timezone argument");
+ << parsedTime.get()->tz_abbr << "') together with a timezone argument");
break;
default: // should technically not be possible to reach
uasserted(ErrorCodes::ConversionFailure,
diff --git a/src/mongo/db/query/datetime/date_time_support.h b/src/mongo/db/query/datetime/date_time_support.h
index 94ac4c4d08e..f5efdcb8fc3 100644
--- a/src/mongo/db/query/datetime/date_time_support.h
+++ b/src/mongo/db/query/datetime/date_time_support.h
@@ -295,8 +295,7 @@ private:
uassert(18537,
str::stream() << "Could not convert date to string: date component was outside "
- << "the supported range of 0-9999: "
- << number,
+ << "the supported range of 0-9999: " << number,
(number >= 0) && (number <= 9999));
int digits = 1;
diff --git a/src/mongo/db/query/datetime/init_timezone_data.cpp b/src/mongo/db/query/datetime/init_timezone_data.cpp
index dea7322dd90..f2de36a65af 100644
--- a/src/mongo/db/query/datetime/init_timezone_data.cpp
+++ b/src/mongo/db/query/datetime/init_timezone_data.cpp
@@ -49,8 +49,7 @@ ServiceContext::ConstructorActionRegisterer loadTimeZoneDB{
if (!timeZoneDatabase) {
uasserted(ErrorCodes::FailedToParse,
str::stream() << "failed to load time zone database from path \""
- << serverGlobalParams.timeZoneInfoPath
- << "\"");
+ << serverGlobalParams.timeZoneInfoPath << "\"");
}
TimeZoneDatabase::set(service,
stdx::make_unique<TimeZoneDatabase>(std::move(timeZoneDatabase)));
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index 8317fc50cfc..e6ad7cc0c5c 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -246,4 +246,4 @@ private:
static void generateServerInfo(BSONObjBuilder* out);
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/explain_options.cpp b/src/mongo/db/query/explain_options.cpp
index b9c771de18e..581252ffdfc 100644
--- a/src/mongo/db/query/explain_options.cpp
+++ b/src/mongo/db/query/explain_options.cpp
@@ -72,13 +72,10 @@ StatusWith<ExplainOptions::Verbosity> ExplainOptions::parseCmdBSON(const BSONObj
verbosity = Verbosity::kExecStats;
} else if (verbStr != kAllPlansExecutionVerbosityStr) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "verbosity string must be one of {'"
- << kQueryPlannerVerbosityStr
- << "', '"
- << kExecStatsVerbosityStr
- << "', '"
- << kAllPlansExecutionVerbosityStr
- << "'}");
+ str::stream()
+ << "verbosity string must be one of {'" << kQueryPlannerVerbosityStr
+ << "', '" << kExecStatsVerbosityStr << "', '"
+ << kAllPlansExecutionVerbosityStr << "'}");
}
}
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 7485d1a7260..9fc2b14cd22 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -337,8 +337,7 @@ Message getMore(OperationContext* opCtx,
// cursor.
uassert(ErrorCodes::Unauthorized,
str::stream() << "Requested getMore on namespace " << ns << ", but cursor " << cursorid
- << " belongs to namespace "
- << cursorPin->nss().ns(),
+ << " belongs to namespace " << cursorPin->nss().ns(),
nss == cursorPin->nss());
// A user can only call getMore on their own cursor. If there were multiple users authenticated
diff --git a/src/mongo/db/query/find_and_modify_request.cpp b/src/mongo/db/query/find_and_modify_request.cpp
index 20f62d2a407..9bf40a1f456 100644
--- a/src/mongo/db/query/find_and_modify_request.cpp
+++ b/src/mongo/db/query/find_and_modify_request.cpp
@@ -171,18 +171,18 @@ StatusWith<FindAndModifyRequest> FindAndModifyRequest::parseFromBSON(NamespaceSt
auto queryElement = cmdObj[kQueryField];
if (queryElement.type() != Object) {
return {ErrorCodes::Error(31160),
- str::stream() << "'" << kQueryField
- << "' parameter must be an object, found "
- << queryElement.type()};
+ str::stream()
+ << "'" << kQueryField << "' parameter must be an object, found "
+ << queryElement.type()};
}
query = queryElement.embeddedObject();
} else if (field == kSortField) {
auto sortElement = cmdObj[kSortField];
if (sortElement.type() != Object) {
return {ErrorCodes::Error(31174),
- str::stream() << "'" << kSortField
- << "' parameter must be an object, found "
- << sortElement.type()};
+ str::stream()
+ << "'" << kSortField << "' parameter must be an object, found "
+ << sortElement.type()};
}
sort = sortElement.embeddedObject();
} else if (field == kRemoveField) {
@@ -195,9 +195,9 @@ StatusWith<FindAndModifyRequest> FindAndModifyRequest::parseFromBSON(NamespaceSt
auto projectionElement = cmdObj[kFieldProjectionField];
if (projectionElement.type() != Object) {
return {ErrorCodes::Error(31175),
- str::stream() << "'" << kFieldProjectionField
- << "' parameter must be an object, found "
- << projectionElement.type()};
+ str::stream()
+ << "'" << kFieldProjectionField
+ << "' parameter must be an object, found " << projectionElement.type()};
}
fields = projectionElement.embeddedObject();
} else if (field == kUpsertField) {
diff --git a/src/mongo/db/query/find_and_modify_request.h b/src/mongo/db/query/find_and_modify_request.h
index a8b350e691f..a5212570755 100644
--- a/src/mongo/db/query/find_and_modify_request.h
+++ b/src/mongo/db/query/find_and_modify_request.h
@@ -117,13 +117,13 @@ public:
//
/**
- * Sets the filter to find a document.
- */
+ * Sets the filter to find a document.
+ */
void setQuery(BSONObj query);
/**
- * Sets the update object that specifies how a document gets updated.
- */
+ * Sets the update object that specifies how a document gets updated.
+ */
void setUpdateObj(BSONObj updateObj);
/**
@@ -134,8 +134,8 @@ public:
void setShouldReturnNew(bool shouldReturnNew);
/**
- * Sets a flag whether the statement performs an upsert.
- */
+ * Sets a flag whether the statement performs an upsert.
+ */
void setUpsert(bool upsert);
//
@@ -210,4 +210,4 @@ private:
// Holds value when performing an update request and none when a remove request.
boost::optional<write_ops::UpdateModification> _update;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 54ade9343eb..6b3273792c0 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -765,8 +765,9 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorFind(
bool permitYield,
size_t plannerOptions) {
const auto& readConcernArgs = repl::ReadConcernArgs::get(opCtx);
- auto yieldPolicy = (permitYield && (readConcernArgs.getLevel() !=
- repl::ReadConcernLevel::kSnapshotReadConcern))
+ auto yieldPolicy =
+ (permitYield &&
+ (readConcernArgs.getLevel() != repl::ReadConcernLevel::kSnapshotReadConcern))
? PlanExecutor::YIELD_AUTO
: PlanExecutor::INTERRUPT_ONLY;
return _getExecutorFind(
@@ -1512,10 +1513,11 @@ QueryPlannerParams fillOutPlannerParamsForDistinct(OperationContext* opCtx,
const IndexCatalogEntry* ice = ii->next();
const IndexDescriptor* desc = ice->descriptor();
if (desc->keyPattern().hasField(parsedDistinct.getKey())) {
- if (!mayUnwindArrays && isAnyComponentOfPathMultikey(desc->keyPattern(),
- desc->isMultikey(opCtx),
- desc->getMultikeyPaths(opCtx),
- parsedDistinct.getKey())) {
+ if (!mayUnwindArrays &&
+ isAnyComponentOfPathMultikey(desc->keyPattern(),
+ desc->isMultikey(opCtx),
+ desc->getMultikeyPaths(opCtx),
+ parsedDistinct.getKey())) {
// If the caller requested "strict" distinct that does not "pre-unwind" arrays,
// then an index which is multikey on the distinct field may not be used. This is
// because when indexing an array each element gets inserted individually. Any plan
diff --git a/src/mongo/db/query/get_executor_test.cpp b/src/mongo/db/query/get_executor_test.cpp
index 16cdf77016a..4e6350630ee 100644
--- a/src/mongo/db/query/get_executor_test.cpp
+++ b/src/mongo/db/query/get_executor_test.cpp
@@ -189,14 +189,13 @@ TEST(GetExecutorTest, GetAllowedIndicesDescendingOrder) {
}
TEST(GetExecutorTest, GetAllowedIndicesMatchesByName) {
- testAllowedIndices(
- {buildSimpleIndexEntry(fromjson("{a: 1}"), "a_1"),
- buildSimpleIndexEntry(fromjson("{a: 1}"), "a_1:en")},
- // BSONObjSet default constructor is explicit, so we cannot copy-list-initialize until
- // C++14.
- SimpleBSONObjComparator::kInstance.makeBSONObjSet(),
- {"a_1"},
- {"a_1"});
+ testAllowedIndices({buildSimpleIndexEntry(fromjson("{a: 1}"), "a_1"),
+ buildSimpleIndexEntry(fromjson("{a: 1}"), "a_1:en")},
+ // BSONObjSet default constructor is explicit, so we cannot
+ // copy-list-initialize until C++14.
+ SimpleBSONObjComparator::kInstance.makeBSONObjSet(),
+ {"a_1"},
+ {"a_1"});
}
TEST(GetExecutorTest, GetAllowedIndicesMatchesMultipleIndexesByKey) {
diff --git a/src/mongo/db/query/getmore_request.cpp b/src/mongo/db/query/getmore_request.cpp
index e577671f2fd..e78f6e4e37c 100644
--- a/src/mongo/db/query/getmore_request.cpp
+++ b/src/mongo/db/query/getmore_request.cpp
@@ -84,8 +84,7 @@ Status GetMoreRequest::isValid() const {
if (batchSize && *batchSize <= 0) {
return Status(ErrorCodes::BadValue,
str::stream() << "Batch size for getMore must be positive, "
- << "but received: "
- << *batchSize);
+ << "but received: " << *batchSize);
}
return Status::OK();
@@ -116,8 +115,8 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
} else if (fieldName == kCollectionField) {
if (el.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field 'collection' must be of type string in: "
- << cmdObj};
+ str::stream()
+ << "Field 'collection' must be of type string in: " << cmdObj};
}
BSONElement collElt = cmdObj["collection"];
@@ -155,9 +154,7 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
} else if (!isGenericArgument(fieldName)) {
return {ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj << ". "
- << "Unrecognized field '"
- << fieldName
- << "'."};
+ << "Unrecognized field '" << fieldName << "'."};
}
}
diff --git a/src/mongo/db/query/getmore_request_test.cpp b/src/mongo/db/query/getmore_request_test.cpp
index f9fe0627cbe..78b235153f8 100644
--- a/src/mongo/db/query/getmore_request_test.cpp
+++ b/src/mongo/db/query/getmore_request_test.cpp
@@ -61,8 +61,7 @@ TEST(GetMoreRequestTest, parseFromBSONCursorIdNotLongLong) {
StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db",
BSON("getMore"
<< "not a number"
- << "collection"
- << 123));
+ << "collection" << 123));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
}
@@ -117,8 +116,7 @@ TEST(GetMoreRequestTest, parseFromBSONUnrecognizedFieldName) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "unknown_field"
- << 1));
+ << "unknown_field" << 1));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
}
@@ -128,8 +126,7 @@ TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSize) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize"
- << -1));
+ << "batchSize" << -1));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
}
@@ -139,8 +136,7 @@ TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSizeOfZero) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize"
- << 0));
+ << "batchSize" << 0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
}
@@ -161,8 +157,7 @@ TEST(GetMoreRequestTest, parseFromBSONBatchSizeProvided) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize"
- << 200));
+ << "batchSize" << 200));
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
ASSERT(result.getValue().batchSize);
@@ -186,8 +181,7 @@ TEST(GetMoreRequestTest, parseFromBSONHasMaxTimeMS) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "maxTimeMS"
- << 100));
+ << "maxTimeMS" << 100));
ASSERT_OK(result.getStatus());
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT(result.getValue().awaitDataTimeout);
@@ -200,8 +194,7 @@ TEST(GetMoreRequestTest, parseFromBSONHasMaxTimeMSOfZero) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "maxTimeMS"
- << 0));
+ << "maxTimeMS" << 0));
ASSERT_OK(result.getStatus());
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
@@ -216,8 +209,7 @@ TEST(GetMoreRequestTest, toBSONHasBatchSize) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "batchSize"
- << 99);
+ << "batchSize" << 99);
ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
@@ -240,10 +232,7 @@ TEST(GetMoreRequestTest, toBSONHasTerm) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "batchSize"
- << 99
- << "term"
- << 1);
+ << "batchSize" << 99 << "term" << 1);
ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
@@ -255,14 +244,11 @@ TEST(GetMoreRequestTest, toBSONHasCommitLevel) {
1,
repl::OpTime(Timestamp(0, 10), 2));
BSONObj requestObj = request.toBSON();
- BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
- << "testcoll"
- << "batchSize"
- << 99
- << "term"
- << 1
- << "lastKnownCommittedOpTime"
- << BSON("ts" << Timestamp(0, 10) << "t" << 2LL));
+ BSONObj expectedRequest =
+ BSON("getMore" << CursorId(123) << "collection"
+ << "testcoll"
+ << "batchSize" << 99 << "term" << 1 << "lastKnownCommittedOpTime"
+ << BSON("ts" << Timestamp(0, 10) << "t" << 2LL));
ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
@@ -276,8 +262,7 @@ TEST(GetMoreRequestTest, toBSONHasMaxTimeMS) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "maxTimeMS"
- << 789);
+ << "maxTimeMS" << 789);
ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
diff --git a/src/mongo/db/query/killcursors_request.cpp b/src/mongo/db/query/killcursors_request.cpp
index df44d73043d..5f21b82d489 100644
--- a/src/mongo/db/query/killcursors_request.cpp
+++ b/src/mongo/db/query/killcursors_request.cpp
@@ -67,8 +67,8 @@ StatusWith<KillCursorsRequest> KillCursorsRequest::parseFromBSON(const std::stri
if (cmdObj[kCursorsField].type() != BSONType::Array) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Field '" << kCursorsField << "' must be of type array in: "
- << cmdObj};
+ str::stream() << "Field '" << kCursorsField
+ << "' must be of type array in: " << cmdObj};
}
std::vector<CursorId> cursorIds;
diff --git a/src/mongo/db/query/killcursors_request_test.cpp b/src/mongo/db/query/killcursors_request_test.cpp
index fef544d0b42..d1cdb1f4650 100644
--- a/src/mongo/db/query/killcursors_request_test.cpp
+++ b/src/mongo/db/query/killcursors_request_test.cpp
@@ -95,8 +95,7 @@ TEST(KillCursorsRequestTest, parseFromBSONCursorFieldNotArray) {
KillCursorsRequest::parseFromBSON("db",
BSON("killCursors"
<< "coll"
- << "cursors"
- << CursorId(123)));
+ << "cursors" << CursorId(123)));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -106,21 +105,18 @@ TEST(KillCursorsRequestTest, parseFromBSONCursorFieldEmptyArray) {
KillCursorsRequest::parseFromBSON("db",
BSON("killCursors"
<< "coll"
- << "cursors"
- << BSONArrayBuilder().arr()));
+ << "cursors" << BSONArrayBuilder().arr()));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::BadValue);
}
TEST(KillCursorsRequestTest, parseFromBSONCursorFieldContainsEltOfWrongType) {
- StatusWith<KillCursorsRequest> result =
- KillCursorsRequest::parseFromBSON("db",
- BSON("killCursors"
- << "coll"
- << "cursors"
- << BSON_ARRAY(CursorId(123) << "foo"
- << CursorId(456))));
+ StatusWith<KillCursorsRequest> result = KillCursorsRequest::parseFromBSON(
+ "db",
+ BSON("killCursors"
+ << "coll"
+ << "cursors" << BSON_ARRAY(CursorId(123) << "foo" << CursorId(456))));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -132,8 +128,7 @@ TEST(KillCursorsRequestTest, toBSON) {
BSONObj requestObj = request.toBSON();
BSONObj expectedObj = BSON("killCursors"
<< "coll"
- << "cursors"
- << BSON_ARRAY(CursorId(123) << CursorId(456)));
+ << "cursors" << BSON_ARRAY(CursorId(123) << CursorId(456)));
ASSERT_BSONOBJ_EQ(requestObj, expectedObj);
}
diff --git a/src/mongo/db/query/killcursors_response.cpp b/src/mongo/db/query/killcursors_response.cpp
index 798b2bf8cb0..8b482772b59 100644
--- a/src/mongo/db/query/killcursors_response.cpp
+++ b/src/mongo/db/query/killcursors_response.cpp
@@ -51,8 +51,8 @@ Status fillOutCursorArray(const BSONObj& cmdResponse,
if (elt.type() != BSONType::Array) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Field '" << fieldName << "' must be of type array in: "
- << cmdResponse};
+ str::stream() << "Field '" << fieldName
+ << "' must be of type array in: " << cmdResponse};
}
for (BSONElement cursorElt : elt.Obj()) {
diff --git a/src/mongo/db/query/killcursors_response_test.cpp b/src/mongo/db/query/killcursors_response_test.cpp
index c0c5da3f278..8f091635bb4 100644
--- a/src/mongo/db/query/killcursors_response_test.cpp
+++ b/src/mongo/db/query/killcursors_response_test.cpp
@@ -41,13 +41,9 @@ namespace {
TEST(KillCursorsResponseTest, parseFromBSONSuccess) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6))
- << "cursorsAlive"
+ << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
<< BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "cursorsUnknown"
- << BSONArray()
- << "ok"
- << 1.0));
+ << "cursorsUnknown" << BSONArray() << "ok" << 1.0));
ASSERT_OK(result.getStatus());
KillCursorsResponse response = result.getValue();
ASSERT_EQ(response.cursorsKilled.size(), 1U);
@@ -65,11 +61,8 @@ TEST(KillCursorsResponseTest, parseFromBSONSuccess) {
TEST(KillCursorsResponseTest, parseFromBSONSuccessOmitCursorsAlive) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6))
- << "cursorsUnknown"
- << BSON_ARRAY(CursorId(789))
- << "ok"
- << 1.0));
+ << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsUnknown"
+ << BSON_ARRAY(CursorId(789)) << "ok" << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -84,13 +77,11 @@ TEST(KillCursorsResponseTest, parseFromBSONCommandNotOk) {
}
TEST(KillCursorsResponseTest, parseFromBSONFieldNotArray) {
- StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
- BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << "foobar"
- << "cursorsAlive"
- << BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "ok"
- << 1.0));
+ StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(BSON(
+ "cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
+ << "foobar"
+ << "cursorsAlive" << BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
+ << "ok" << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -98,11 +89,8 @@ TEST(KillCursorsResponseTest, parseFromBSONFieldNotArray) {
TEST(KillCursorsResponseTest, parseFromBSONArrayContainsInvalidElement) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6))
- << "cursorsAlive"
- << BSON_ARRAY(CursorId(7) << "foobar" << CursorId(9))
- << "ok"
- << 1.0));
+ << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
+ << BSON_ARRAY(CursorId(7) << "foobar" << CursorId(9)) << "ok" << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -116,13 +104,9 @@ TEST(KillCursorsResponseTest, toBSON) {
BSONObj responseObj = response.toBSON();
BSONObj expectedResponse =
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6))
- << "cursorsAlive"
+ << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
<< BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "cursorsUnknown"
- << BSONArray()
- << "ok"
- << 1.0);
+ << "cursorsUnknown" << BSONArray() << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
diff --git a/src/mongo/db/query/parsed_distinct.cpp b/src/mongo/db/query/parsed_distinct.cpp
index 2d5a74af4c0..b5420ecaf3d 100644
--- a/src/mongo/db/query/parsed_distinct.cpp
+++ b/src/mongo/db/query/parsed_distinct.cpp
@@ -292,11 +292,10 @@ StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* opCtx,
if (auto readConcernElt = cmdObj[repl::ReadConcernArgs::kReadConcernFieldName]) {
if (readConcernElt.type() != BSONType::Object) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "\"" << repl::ReadConcernArgs::kReadConcernFieldName
- << "\" had the wrong type. Expected "
- << typeName(BSONType::Object)
- << ", found "
- << typeName(readConcernElt.type()));
+ str::stream()
+ << "\"" << repl::ReadConcernArgs::kReadConcernFieldName
+ << "\" had the wrong type. Expected " << typeName(BSONType::Object)
+ << ", found " << typeName(readConcernElt.type()));
}
qr->setReadConcern(readConcernElt.embeddedObject());
}
@@ -304,11 +303,10 @@ StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* opCtx,
if (auto queryOptionsElt = cmdObj[QueryRequest::kUnwrappedReadPrefField]) {
if (queryOptionsElt.type() != BSONType::Object) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "\"" << QueryRequest::kUnwrappedReadPrefField
- << "\" had the wrong type. Expected "
- << typeName(BSONType::Object)
- << ", found "
- << typeName(queryOptionsElt.type()));
+ str::stream()
+ << "\"" << QueryRequest::kUnwrappedReadPrefField
+ << "\" had the wrong type. Expected " << typeName(BSONType::Object)
+ << ", found " << typeName(queryOptionsElt.type()));
}
qr->setUnwrappedReadPref(queryOptionsElt.embeddedObject());
}
diff --git a/src/mongo/db/query/parsed_distinct_test.cpp b/src/mongo/db/query/parsed_distinct_test.cpp
index bf48d19439e..dd6e501ed24 100644
--- a/src/mongo/db/query/parsed_distinct_test.cpp
+++ b/src/mongo/db/query/parsed_distinct_test.cpp
@@ -73,10 +73,10 @@ TEST(ParsedDistinctTest, ConvertToAggregationNoQuery) {
std::vector<BSONObj> expectedPipeline{
BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true)),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$x")))};
+ << "preserveNullAndEmptyArrays" << true)),
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$x")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
@@ -113,23 +113,21 @@ TEST(ParsedDistinctTest, ConvertToAggregationDottedPathNoQuery) {
std::vector<BSONObj> expectedPipeline{
BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true)),
+ << "preserveNullAndEmptyArrays" << true)),
BSON("$unwind" << BSON("path"
<< "$x.y"
- << "preserveNullAndEmptyArrays"
- << true)),
+ << "preserveNullAndEmptyArrays" << true)),
BSON("$unwind" << BSON("path"
<< "$x.y.z"
- << "preserveNullAndEmptyArrays"
- << true)),
+ << "preserveNullAndEmptyArrays" << true)),
BSON("$match" << BSON("x" << BSON("$_internalSchemaType"
<< "object")
<< "x.y"
<< BSON("$_internalSchemaType"
<< "object"))),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$x.y.z")))};
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$x.y.z")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
@@ -159,9 +157,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithAllOptions) {
<< "secondary")
<< "comment"
<< "aComment"
- << "maxTimeMS"
- << 100
- << "$db"
+ << "maxTimeMS" << 100 << "$db"
<< "testdb"),
ExtensionsCallbackNoop(),
!isExplain);
@@ -190,10 +186,10 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithAllOptions) {
std::vector<BSONObj> expectedPipeline{
BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true)),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$x")))};
+ << "preserveNullAndEmptyArrays" << true)),
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$x")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
@@ -232,10 +228,10 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
BSON("$match" << BSON("z" << 7)),
BSON("$unwind" << BSON("path"
<< "$y"
- << "preserveNullAndEmptyArrays"
- << true)),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$y")))};
+ << "preserveNullAndEmptyArrays" << true)),
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$y")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
@@ -269,10 +265,10 @@ TEST(ParsedDistinctTest, ExplainNotIncludedWhenConvertingToAggregationCommand) {
std::vector<BSONObj> expectedPipeline{
BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true)),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$x")))};
+ << "preserveNullAndEmptyArrays" << true)),
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$x")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
diff --git a/src/mongo/db/query/parsed_projection.cpp b/src/mongo/db/query/parsed_projection.cpp
index aaa3bd36f3d..359ad5c23d8 100644
--- a/src/mongo/db/query/parsed_projection.cpp
+++ b/src/mongo/db/query/parsed_projection.cpp
@@ -34,8 +34,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
/**
* Parses the projection 'spec' and checks its validity with respect to the query 'query'.
@@ -297,9 +297,9 @@ Status ParsedProjection::make(OperationContext* opCtx,
// $meta sortKey should not be checked as a part of _requiredFields, since it can
// potentially produce a covered projection as long as the sort key is covered.
if (BSONType::Object == elt.type()) {
- dassert(
- SimpleBSONObjComparator::kInstance.evaluate(elt.Obj() == BSON("$meta"
- << "sortKey")));
+ dassert(SimpleBSONObjComparator::kInstance.evaluate(elt.Obj() ==
+ BSON("$meta"
+ << "sortKey")));
continue;
}
if (elt.trueValue()) {
diff --git a/src/mongo/db/query/parsed_projection_test.cpp b/src/mongo/db/query/parsed_projection_test.cpp
index 84669166c8d..dc00e1c86ee 100644
--- a/src/mongo/db/query/parsed_projection_test.cpp
+++ b/src/mongo/db/query/parsed_projection_test.cpp
@@ -38,8 +38,8 @@
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
using namespace mongo;
@@ -62,8 +62,7 @@ unique_ptr<ParsedProjection> createParsedProjection(const BSONObj& query, const
Status status = ParsedProjection::make(opCtx.get(), projObj, queryMatchExpr.get(), &out);
if (!status.isOK()) {
FAIL(str::stream() << "failed to parse projection " << projObj << " (query: " << query
- << "): "
- << status.toString());
+ << "): " << status.toString());
}
ASSERT(out);
return unique_ptr<ParsedProjection>(out);
diff --git a/src/mongo/db/query/plan_cache_indexability.cpp b/src/mongo/db/query/plan_cache_indexability.cpp
index 7687ed1dca6..553b8002232 100644
--- a/src/mongo/db/query/plan_cache_indexability.cpp
+++ b/src/mongo/db/query/plan_cache_indexability.cpp
@@ -91,7 +91,7 @@ bool nodeIsConservativelySupportedBySparseIndex(const MatchExpression* me) {
const bool inElemMatch = false;
return QueryPlannerIXSelect::nodeIsSupportedBySparseIndex(me, inElemMatch);
}
-}
+} // namespace
void PlanCacheIndexabilityState::processSparseIndex(const std::string& indexName,
const BSONObj& keyPattern) {
diff --git a/src/mongo/db/query/plan_cache_indexability_test.cpp b/src/mongo/db/query/plan_cache_indexability_test.cpp
index d4d91dfe7f9..48116f58416 100644
--- a/src/mongo/db/query/plan_cache_indexability_test.cpp
+++ b/src/mongo/db/query/plan_cache_indexability_test.cpp
@@ -47,8 +47,8 @@ std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj,
expCtx->setCollator(collator);
StatusWithMatchExpression status = MatchExpressionParser::parse(obj, std::move(expCtx));
if (!status.isOK()) {
- FAIL(str::stream() << "failed to parse query: " << obj.toString() << ". Reason: "
- << status.getStatus().toString());
+ FAIL(str::stream() << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString());
}
return std::move(status.getValue());
}
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 5614137b90a..8507ab4707f 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -1337,8 +1337,7 @@ TEST_F(CachePlanSelectionTest, Or2DSphereNonNear) {
TEST_F(CachePlanSelectionTest, AndWithinPolygonWithinCenterSphere) {
addIndex(BSON("a"
<< "2dsphere"
- << "b"
- << 1),
+ << "b" << 1),
"a_2dsphere_b_2dsphere");
BSONObj query = fromjson(
diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp
index 7c6d498007c..910b306502d 100644
--- a/src/mongo/db/query/plan_enumerator.cpp
+++ b/src/mongo/db/query/plan_enumerator.cpp
@@ -41,10 +41,10 @@
namespace {
using namespace mongo;
-using std::unique_ptr;
using std::endl;
using std::set;
using std::string;
+using std::unique_ptr;
using std::vector;
std::string getPathPrefix(std::string path) {
@@ -668,9 +668,9 @@ bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
// multikey information.
invariant(INDEX_2DSPHERE == thisIndex.type);
- if (predsOverLeadingField.end() != std::find(predsOverLeadingField.begin(),
- predsOverLeadingField.end(),
- mandatoryPred)) {
+ if (predsOverLeadingField.end() !=
+ std::find(
+ predsOverLeadingField.begin(), predsOverLeadingField.end(), mandatoryPred)) {
// The mandatory predicate is on the leading field of 'thisIndex'. We assign it to
// 'thisIndex' and skip assigning any other predicates on the leading field to
// 'thisIndex' because no additional predicate on the leading field will generate a
@@ -722,9 +722,9 @@ bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
}
} else if (thisIndex.multikey) {
// Special handling for multikey mandatory indices.
- if (predsOverLeadingField.end() != std::find(predsOverLeadingField.begin(),
- predsOverLeadingField.end(),
- mandatoryPred)) {
+ if (predsOverLeadingField.end() !=
+ std::find(
+ predsOverLeadingField.begin(), predsOverLeadingField.end(), mandatoryPred)) {
// The mandatory predicate is over the first field of the index. Assign
// it now.
indexAssign.preds.push_back(mandatoryPred);
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index 1e8e84da6a7..3c4b601d1aa 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -46,9 +46,9 @@
namespace mongo {
-using std::unique_ptr;
using std::endl;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace dps = ::mongo::dotted_path_support;
diff --git a/src/mongo/db/query/planner_ixselect.cpp b/src/mongo/db/query/planner_ixselect.cpp
index c35bb3cbdfb..07e1532dc60 100644
--- a/src/mongo/db/query/planner_ixselect.cpp
+++ b/src/mongo/db/query/planner_ixselect.cpp
@@ -682,13 +682,14 @@ void QueryPlannerIXSelect::_rateIndices(MatchExpression* node,
const IndexEntry& index = indices[i];
std::size_t keyPatternIndex = 0;
for (auto&& keyPatternElt : index.keyPattern) {
- if (keyPatternElt.fieldNameStringData() == fullPath && _compatible(keyPatternElt,
- index,
- keyPatternIndex,
- node,
- fullPath,
- collator,
- elemMatchCtx)) {
+ if (keyPatternElt.fieldNameStringData() == fullPath &&
+ _compatible(keyPatternElt,
+ index,
+ keyPatternIndex,
+ node,
+ fullPath,
+ collator,
+ elemMatchCtx)) {
if (keyPatternIndex == 0) {
rt->first.push_back(i);
} else {
diff --git a/src/mongo/db/query/planner_ixselect_test.cpp b/src/mongo/db/query/planner_ixselect_test.cpp
index e80eddd187b..e1018a87944 100644
--- a/src/mongo/db/query/planner_ixselect_test.cpp
+++ b/src/mongo/db/query/planner_ixselect_test.cpp
@@ -51,8 +51,8 @@ namespace {
constexpr CollatorInterface* kSimpleCollator = nullptr;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
/**
@@ -1131,8 +1131,7 @@ TEST(QueryPlannerIXSelectTest, InternalExprEqCanUseHashedIndex) {
TEST(QueryPlannerIXSelectTest, InternalExprEqCannotUseTextIndexPrefix) {
auto entry = buildSimpleIndexEntry(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
std::vector<IndexEntry> indices;
indices.push_back(entry);
std::set<size_t> expectedIndices;
@@ -1143,10 +1142,7 @@ TEST(QueryPlannerIXSelectTest, InternalExprEqCannotUseTextIndexPrefix) {
TEST(QueryPlannerIXSelectTest, InternalExprEqCanUseTextIndexSuffix) {
auto entry = buildSimpleIndexEntry(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1
- << "a"
- << 1));
+ << "_ftsx" << 1 << "a" << 1));
std::vector<IndexEntry> indices;
indices.push_back(entry);
std::set<size_t> expectedIndices = {0};
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 655a6816194..54c6a0b9fb0 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -58,8 +58,8 @@
namespace mongo {
-using std::unique_ptr;
using std::numeric_limits;
+using std::unique_ptr;
namespace dps = ::mongo::dotted_path_support;
@@ -520,8 +520,8 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache(
auto soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, std::move(solnRoot));
if (!soln) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Failed to analyze plan from cache. Query: "
- << query.toStringShort());
+ str::stream()
+ << "Failed to analyze plan from cache. Query: " << query.toStringShort());
}
LOG(5) << "Planner: solution constructed from the cache:\n" << redact(soln->toString());
@@ -610,11 +610,10 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
}
if (fullIndexList.size() > 1) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "Hint matched multiple indexes, "
- << "must hint by index name. Matched: "
- << fullIndexList[0].toString()
- << " and "
- << fullIndexList[1].toString());
+ str::stream()
+ << "Hint matched multiple indexes, "
+ << "must hint by index name. Matched: " << fullIndexList[0].toString()
+ << " and " << fullIndexList[1].toString());
}
hintedIndexEntry.emplace(fullIndexList.front());
diff --git a/src/mongo/db/query/query_planner_geo_test.cpp b/src/mongo/db/query/query_planner_geo_test.cpp
index c70ec258481..b23c40a64fe 100644
--- a/src/mongo/db/query/query_planner_geo_test.cpp
+++ b/src/mongo/db/query/query_planner_geo_test.cpp
@@ -89,8 +89,7 @@ TEST_F(QueryPlannerTest, Basic2DSphereCompound) {
TEST_F(QueryPlannerTest, Basic2DCompound) {
addIndex(BSON("loc"
<< "2d"
- << "a"
- << 1));
+ << "a" << 1));
runQuery(
fromjson("{ loc: { $geoWithin: { $box : [[0, 0],[10, 10]] } },"
@@ -247,8 +246,7 @@ TEST_F(QueryPlannerTest, Multikey2DSphereGeoNearReverseCompound) {
TEST_F(QueryPlannerTest, 2DNonNearContainedOr) {
addIndex(BSON("a"
<< "2d"
- << "x"
- << 1));
+ << "x" << 1));
addIndex(BSON("y" << 1));
runQuery(
fromjson("{$and: [{x: 1}, {$or: [{a: {$within: {$polygon: [[0, 0], [0, 1], [1, 0], [0, "
@@ -649,10 +647,7 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearCompoundTest) {
// true means multikey
addIndex(BSON("a" << 1 << "b"
<< "2dsphere"
- << "c"
- << 1
- << "d"
- << 1),
+ << "c" << 1 << "d" << 1),
true);
runQuery(
fromjson("{a: {$gte: 0}, c: {$gte: 0, $lt: 4}, d: {$gt: 1, $lt: 5},"
@@ -671,8 +666,7 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DNear) {
// true means multikey
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
true);
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$gte: 0}}"));
@@ -1163,10 +1157,7 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{1U}, {1U}, {1U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b"
- << 1
- << "a.c"
- << 1),
+ << "a.b" << 1 << "a.c" << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, 'a.b': 2, 'a.c': 3}"));
@@ -1196,10 +1187,7 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U}, {0U}, {0U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b"
- << 1
- << "a.c"
- << 1),
+ << "a.b" << 1 << "a.c" << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, 'a.b': 2, 'a.c': 3}"));
@@ -1230,10 +1218,7 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U}, {0U}, {0U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b"
- << 1
- << "a.c"
- << 1),
+ << "a.b" << 1 << "a.c" << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, a: {$elemMatch: {b: 2, c: 3}}}"));
@@ -1265,10 +1250,7 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U, 1U}, {0U, 1U}, {0U, 1U}};
addIndex(BSON("a.b.geo"
<< "2dsphere"
- << "a.b.c"
- << 1
- << "a.b.d"
- << 1),
+ << "a.b.c" << 1 << "a.b.d" << 1),
multikeyPaths);
runQuery(fromjson("{'a.b.geo': {$nearSphere: [0, 0]}, a: {$elemMatch: {'b.c': 2, 'b.d': 3}}}"));
@@ -1432,8 +1414,7 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDNearCompound) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("geo"
<< "2dsphere"
- << "nongeo"
- << 1)};
+ << "nongeo" << 1)};
BSONObj predicate = fromjson("{geo: {$nearSphere: [-71.34895, 42.46037]}}");
testMultiple2dsphereIndexVersions(versions, keyPatterns, predicate, 1U);
}
@@ -1444,16 +1425,10 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDSphereSparseBelowOr) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("geo1"
<< "2dsphere"
- << "a"
- << 1
- << "b"
- << 1),
+ << "a" << 1 << "b" << 1),
BSON("geo2"
<< "2dsphere"
- << "a"
- << 1
- << "b"
- << 1)};
+ << "a" << 1 << "b" << 1)};
BSONObj predicate = fromjson(
"{a: 4, b: 5, $or: ["
@@ -1475,8 +1450,7 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDSphereSparseBelowElemMatch) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("a.b"
<< "2dsphere"
- << "a.c"
- << 1)};
+ << "a.c" << 1)};
BSONObj predicate = fromjson(
"{a: {$elemMatch: {b: {$geoWithin: {$centerSphere: [[10,20], 0.01]}},"
@@ -1600,8 +1574,7 @@ TEST_F(QueryPlannerTest, 2dInexactFetchPredicateOverTrailingFieldHandledCorrectl
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(fromjson("{a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: true}}"));
assertNumSolutions(1U);
@@ -1616,8 +1589,7 @@ TEST_F(QueryPlannerTest, 2dInexactFetchPredicateOverTrailingFieldHandledCorrectl
const bool multikey = true;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(fromjson("{a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: true}}"));
@@ -1632,8 +1604,7 @@ TEST_F(QueryPlannerTest, 2dNearInexactFetchPredicateOverTrailingFieldHandledCorr
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$exists: true}}"));
assertNumSolutions(1U);
@@ -1647,8 +1618,7 @@ TEST_F(QueryPlannerTest, 2dNearInexactFetchPredicateOverTrailingFieldMultikey) {
const bool multikey = true;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$exists: true}}"));
@@ -1661,8 +1631,7 @@ TEST_F(QueryPlannerTest, 2dNearWithInternalExprEqOverTrailingField) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$_internalExprEq: 1}}"));
assertNumSolutions(1U);
@@ -1673,8 +1642,7 @@ TEST_F(QueryPlannerTest, 2dNearWithInternalExprEqOverTrailingFieldMultikey) {
const bool multikey = true;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$_internalExprEq: 1}}"));
@@ -1687,8 +1655,7 @@ TEST_F(QueryPlannerTest, 2dGeoWithinWithInternalExprEqOverTrailingField) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(
fromjson("{a: {$within: {$polygon: [[0,0], [2,0], [4,0]]}}, b: {$_internalExprEq: 2}}"));
@@ -1745,8 +1712,7 @@ TEST_F(QueryPlannerTest, 2dsphereNonNearWithInternalExprEqOverTrailingField) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a"
<< "2dsphere"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(
fromjson("{b: {$_internalExprEq: 0}, a: {$geoWithin: {$centerSphere: [[0, 0], 10]}}}"));
@@ -1767,8 +1733,7 @@ TEST_F(QueryPlannerTest, 2dsphereNonNearWithInternalExprEqOverTrailingFieldMulti
const bool multikey = true;
addIndex(BSON("a"
<< "2dsphere"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(
@@ -1791,8 +1756,7 @@ TEST_F(QueryPlannerTest, 2dWithinPredicateOverTrailingFieldElemMatchMultikey) {
const bool multikey = true;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(fromjson("{a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$elemMatch: {c: 1}}}"));
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index 8d944b29eb9..4a182e5a27e 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -434,7 +434,7 @@ TEST_F(QueryPlannerTest, NotEqualsNullSparseIndex) {
addIndex(BSON("x" << 1),
false, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{x: {$ne: null}}"));
@@ -449,7 +449,7 @@ TEST_F(QueryPlannerTest, NotEqualsNullSparseMultiKeyIndex) {
addIndex(BSON("x" << 1),
true, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{x: {$ne: null}}"));
@@ -462,7 +462,7 @@ TEST_F(QueryPlannerTest, NotEqualsNullInElemMatchValueSparseMultiKeyIndex) {
addIndex(BSON("x" << 1),
true, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{'x': {$elemMatch: {$ne: null}}}"));
@@ -1674,8 +1674,7 @@ TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSortWithIndexablePred) {
TEST_F(QueryPlannerTest, CantUseTextIndexToProvideSort) {
addIndex(BSON("x" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
ASSERT_EQUALS(getNumSolutions(), 1U);
@@ -2744,7 +2743,7 @@ TEST_F(QueryPlannerTest, NegationCannotUseSparseIndex) {
addIndex(fromjson("{a: 1}"),
false, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{a: {$ne: 5}}"));
assertHasOnlyCollscan();
@@ -2758,7 +2757,7 @@ TEST_F(QueryPlannerTest, NegationInElemMatchDoesNotUseSparseIndex) {
addIndex(fromjson("{a: 1}"),
true, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{a: {$elemMatch: {$ne: 5}}}"));
assertHasOnlyCollscan();
@@ -2770,7 +2769,7 @@ TEST_F(QueryPlannerTest, SparseIndexCannotSupportEqualsNull) {
addIndex(BSON("i" << 1),
false, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{i: {$eq: null}}"));
assertHasOnlyCollscan();
@@ -2784,7 +2783,7 @@ TEST_F(QueryPlannerTest, SparseIndexCanSupportGTEOrLTENull) {
addIndex(BSON("i" << 1),
false, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{i: {$gte: null}}"));
assertNumSolutions(1U);
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index 59306ff1feb..14251a98af0 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -548,8 +548,8 @@ std::unique_ptr<MatchExpression> QueryPlannerTest::parseMatchExpression(
expCtx->setCollator(collator);
StatusWithMatchExpression status = MatchExpressionParser::parse(obj, std::move(expCtx));
if (!status.isOK()) {
- FAIL(str::stream() << "failed to parse query: " << obj.toString() << ". Reason: "
- << status.getStatus().toString());
+ FAIL(str::stream() << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString());
}
return std::move(status.getValue());
}
diff --git a/src/mongo/db/query/query_planner_text_test.cpp b/src/mongo/db/query/query_planner_text_test.cpp
index d0b148349ca..ed4b1e45247 100644
--- a/src/mongo/db/query/query_planner_text_test.cpp
+++ b/src/mongo/db/query/query_planner_text_test.cpp
@@ -52,8 +52,7 @@ using namespace mongo;
TEST_F(QueryPlannerTest, SimpleText) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$text: {$search: 'blah'}}"));
assertNumSolutions(1);
@@ -65,8 +64,7 @@ TEST_F(QueryPlannerTest, CantUseTextUnlessHaveTextPred) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a:1}"));
// No table scans allowed so there is no solution.
@@ -79,8 +77,7 @@ TEST_F(QueryPlannerTest, HaveOKPrefixOnTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -99,8 +96,7 @@ TEST_F(QueryPlannerTest, HaveBadPrefixOnTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runInvalidQuery(fromjson("{a:{$gt: 1}, $text:{$search: 'blah'}}"));
runInvalidQuery(fromjson("{$text: {$search: 'blah'}}"));
@@ -113,8 +109,7 @@ TEST_F(QueryPlannerTest, PrefixOnTextIndexIsOutsidePred) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
addIndex(BSON("b" << 1));
runInvalidQuery(fromjson("{$and: [{a: 5}, {$or: [{$text: {$search: 'blah'}}, {b: 6}]}]}"));
}
@@ -124,8 +119,7 @@ TEST_F(QueryPlannerTest, ManyPrefixTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
// Both points.
runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
@@ -150,10 +144,7 @@ TEST_F(QueryPlannerTest, SuffixOptional) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -168,10 +159,7 @@ TEST_F(QueryPlannerTest, RemoveFromSubtree) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
runQuery(fromjson("{a:1, $or: [{a:1}, {b:7}], $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -187,8 +175,7 @@ TEST_F(QueryPlannerTest, CompoundPrefixEvenIfMultikey) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1),
+ << "_ftsx" << 1),
true);
// Both points.
@@ -201,10 +188,7 @@ TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafPrefix) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
// 'a' is not an EQ so it doesn't compound w/the text pred. We also shouldn't use the text
// index to satisfy it w/o the text query.
@@ -215,10 +199,7 @@ TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafSuffixNoPrefix) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
runQuery(fromjson("{b:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -228,8 +209,7 @@ TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$and: [{a: 3}, {$text: {$search: 'foo'}}], a: 3}"));
assertNumSolutions(1U);
@@ -242,8 +222,7 @@ TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndexAndMultiplePredsOnIndexPr
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$and: [{a: 1}, {a: 2}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
@@ -257,8 +236,7 @@ TEST_F(QueryPlannerTest, TextInsideOrBasic) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a: 0, $or: [{_id: 1}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
@@ -274,8 +252,7 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$and: [{$or: [{a: 3}, {a: 4}]}, "
"{$or: [{$text: {$search: 'foo'}}, {a: 5}]}]}"));
@@ -294,8 +271,7 @@ TEST_F(QueryPlannerTest, TextInsideOrOfAnd) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
"{a: {$gt: 3}, $text: {$search: 'foo'}}]}"));
@@ -316,8 +292,7 @@ TEST_F(QueryPlannerTest, TextInsideAndOrAnd) {
addIndex(BSON("b" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{a: 1, $or: [{a:2}, {b:2}, "
"{a: 1, $text: {$search: 'foo'}}]}"));
@@ -336,8 +311,7 @@ TEST_F(QueryPlannerTest, TextInsideAndOrAndOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
"{a: {$gt: 3}, $or: [{$text: {$search: 'foo'}}, "
@@ -360,8 +334,7 @@ TEST_F(QueryPlannerTest, TextInsideOrOneBranchNotIndexed) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a: 1, $or: [{b: 2}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(0);
@@ -374,8 +347,7 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$and: [{$or: [{a: 1}, {b: 1}]}, "
"{$or: [{a: 2}, {$text: {$search: 'foo'}}]}]}"));
@@ -390,8 +362,7 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$text: {$search: 'foo'}, a: {$geoIntersects: {$geometry: "
"{type: 'Point', coordinates: [3.0, 1.0]}}}}"));
@@ -405,8 +376,7 @@ TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
TEST_F(QueryPlannerTest, OrTextExact) {
addIndex(BSON("pre" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
addIndex(BSON("other" << 1));
runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: 2}]}"));
@@ -421,8 +391,7 @@ TEST_F(QueryPlannerTest, OrTextExact) {
TEST_F(QueryPlannerTest, OrTextInexactCovered) {
addIndex(BSON("pre" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
addIndex(BSON("other" << 1));
runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: /bar/}]}"));
@@ -437,8 +406,7 @@ TEST_F(QueryPlannerTest, OrTextInexactCovered) {
TEST_F(QueryPlannerTest, TextCaseSensitive) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$text: {$search: 'blah', $caseSensitive: true}}"));
assertNumSolutions(1);
@@ -448,8 +416,7 @@ TEST_F(QueryPlannerTest, TextCaseSensitive) {
TEST_F(QueryPlannerTest, TextDiacriticSensitive) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$text: {$search: 'blah', $diacriticSensitive: true}}"));
assertNumSolutions(1);
@@ -459,8 +426,7 @@ TEST_F(QueryPlannerTest, TextDiacriticSensitive) {
TEST_F(QueryPlannerTest, SortKeyMetaProjectionWithTextScoreMetaSort) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuerySortProj(fromjson("{$text: {$search: 'foo'}}"),
fromjson("{a: {$meta: 'textScore'}}"),
@@ -477,8 +443,7 @@ TEST_F(QueryPlannerTest, PredicatesOverLeadingFieldsWithSharedPathPrefixHandledC
const bool multikey = true;
addIndex(BSON("a.x" << 1 << "a.y" << 1 << "b.x" << 1 << "b.y" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1),
+ << "_ftsx" << 1),
multikey);
runQuery(fromjson("{'a.x': 1, 'a.y': 2, 'b.x': 3, 'b.y': 4, $text: {$search: 'foo'}}"));
@@ -491,8 +456,7 @@ TEST_F(QueryPlannerTest, PredicatesOverLeadingFieldsWithSharedPathPrefixHandledC
TEST_F(QueryPlannerTest, EqualityToArrayOverLeadingFieldHandledCorrectly) {
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a: [1, 2, 3], $text: {$search: 'foo'}}"));
@@ -504,8 +468,7 @@ TEST_F(QueryPlannerTest, EqualityToArrayOverLeadingFieldHandledCorrectlyWithMult
const bool multikey = true;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1),
+ << "_ftsx" << 1),
multikey);
runQuery(fromjson("{a: [1, 2, 3], $text: {$search: 'foo'}}"));
@@ -517,10 +480,7 @@ TEST_F(QueryPlannerTest, EqualityToArrayOverLeadingFieldHandledCorrectlyWithMult
TEST_F(QueryPlannerTest, InexactFetchPredicateOverTrailingFieldHandledCorrectly) {
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
runQuery(fromjson("{a: 3, $text: {$search: 'foo'}, b: {$exists: true}}"));
@@ -533,10 +493,7 @@ TEST_F(QueryPlannerTest, InexactFetchPredicateOverTrailingFieldHandledCorrectlyM
const bool multikey = true;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1),
+ << "_ftsx" << 1 << "b" << 1),
multikey);
runQuery(fromjson("{a: 3, $text: {$search: 'foo'}, b: {$exists: true}}"));
@@ -550,8 +507,7 @@ TEST_F(QueryPlannerTest, ExprEqCannotUsePrefixOfTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runInvalidQuery(fromjson("{a: {$_internalExprEq: 3}, $text: {$search: 'blah'}}"));
}
@@ -560,10 +516,7 @@ TEST_F(QueryPlannerTest, ExprEqCanUseSuffixOfTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1
- << "a"
- << 1));
+ << "_ftsx" << 1 << "a" << 1));
runQuery(fromjson("{a: {$_internalExprEq: 3}, $text: {$search: 'blah'}}"));
diff --git a/src/mongo/db/query/query_planner_wildcard_index_test.cpp b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
index d0fd0def30e..eba458736af 100644
--- a/src/mongo/db/query/query_planner_wildcard_index_test.cpp
+++ b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
@@ -901,8 +901,7 @@ TEST_F(QueryPlannerWildcardTest, WildcardIndexDoesNotSupplyCandidatePlanForTextS
addWildcardIndex(BSON("$**" << 1));
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
// Confirm that the wildcard index generates candidate plans for queries which do not include a
// $text predicate.
diff --git a/src/mongo/db/query/query_request.cpp b/src/mongo/db/query/query_request.cpp
index c43317b584b..e0082a90d80 100644
--- a/src/mongo/db/query/query_request.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -399,9 +399,7 @@ StatusWith<unique_ptr<QueryRequest>> QueryRequest::parseFromFindCommand(unique_p
} else if (!isGenericArgument(fieldName)) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
- << "Unrecognized field '"
- << fieldName
- << "'.");
+ << "Unrecognized field '" << fieldName << "'.");
}
}
@@ -645,26 +643,26 @@ Status QueryRequest::validate() const {
if (_limit && *_limit < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Limit value must be non-negative, but received: "
- << *_limit);
+ str::stream()
+ << "Limit value must be non-negative, but received: " << *_limit);
}
if (_batchSize && *_batchSize < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "BatchSize value must be non-negative, but received: "
- << *_batchSize);
+ str::stream()
+ << "BatchSize value must be non-negative, but received: " << *_batchSize);
}
if (_ntoreturn && *_ntoreturn < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "NToReturn value must be non-negative, but received: "
- << *_ntoreturn);
+ str::stream()
+ << "NToReturn value must be non-negative, but received: " << *_ntoreturn);
}
if (_maxTimeMS < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "MaxTimeMS value must be non-negative, but received: "
- << _maxTimeMS);
+ str::stream()
+ << "MaxTimeMS value must be non-negative, but received: " << _maxTimeMS);
}
if (_tailableMode != TailableModeEnum::kNormal) {
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index ed4d369602b..f7cc73a1419 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -1485,5 +1485,5 @@ TEST_F(QueryRequestTest, ParseFromUUID) {
ASSERT_EQ(nss, qr.nss());
}
-} // namespace mongo
} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/query_settings_test.cpp b/src/mongo/db/query/query_settings_test.cpp
index 41cb1cc0c3d..6a6d0dce66f 100644
--- a/src/mongo/db/query/query_settings_test.cpp
+++ b/src/mongo/db/query/query_settings_test.cpp
@@ -42,9 +42,9 @@
using mongo::AllowedIndicesFilter;
using mongo::BSONObj;
+using mongo::fromjson;
using mongo::IndexEntry;
using mongo::SimpleBSONObjComparator;
-using mongo::fromjson;
namespace {
TEST(QuerySettingsTest, AllowedIndicesFilterAllowsIndexesByName) {
@@ -113,4 +113,4 @@ TEST(QuerySettingsTest, AllowedIndicesFilterAllowsIndexesByKeyPattern) {
ASSERT_TRUE(filter.allows(a_idx));
ASSERT_FALSE(filter.allows(ab_idx));
}
-}
+} // namespace
diff --git a/src/mongo/db/query/query_solution.cpp b/src/mongo/db/query/query_solution.cpp
index 23c83c6fb8a..20fe9824450 100644
--- a/src/mongo/db/query/query_solution.cpp
+++ b/src/mongo/db/query/query_solution.cpp
@@ -154,7 +154,7 @@ void addEqualityFieldSorts(const BSONObj& sortPattern,
sortsOut->insert(prefixBob.obj());
}
}
-}
+} // namespace
string QuerySolutionNode::toString() const {
str::stream ss;
diff --git a/src/mongo/db/query/query_solution_test.cpp b/src/mongo/db/query/query_solution_test.cpp
index 7ac47cb2aad..ec3821b7bef 100644
--- a/src/mongo/db/query/query_solution_test.cpp
+++ b/src/mongo/db/query/query_solution_test.cpp
@@ -727,8 +727,7 @@ auto createMatchExprAndParsedProjection(const BSONObj& query, const BSONObj& pro
ParsedProjection::make(opCtx.get(), projObj, queryMatchExpr.getValue().get(), &out);
if (!status.isOK()) {
FAIL(str::stream() << "failed to parse projection " << projObj << " (query: " << query
- << "): "
- << status.toString());
+ << "): " << status.toString());
}
ASSERT(out);
return std::make_pair(std::move(queryMatchExpr.getValue()),
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index 012efb8a262..02056023010 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -94,10 +94,9 @@ PlanStage* buildStages(OperationContext* opCtx,
auto descriptor = collection->getIndexCatalog()->findIndexByName(
opCtx, ixn->index.identifier.catalogName);
invariant(descriptor,
- str::stream() << "Namespace: " << collection->ns() << ", CanonicalQuery: "
- << cq.toStringShort()
- << ", IndexEntry: "
- << ixn->index.toString());
+ str::stream() << "Namespace: " << collection->ns()
+ << ", CanonicalQuery: " << cq.toStringShort()
+ << ", IndexEntry: " << ixn->index.toString());
// We use the node's internal name, keyPattern and multikey details here. For $**
// indexes, these may differ from the information recorded in the index's descriptor.
diff --git a/src/mongo/db/read_concern.h b/src/mongo/db/read_concern.h
index 7bd7594e143..c9ac7f08e1c 100644
--- a/src/mongo/db/read_concern.h
+++ b/src/mongo/db/read_concern.h
@@ -42,7 +42,7 @@ enum class PrepareConflictBehavior;
namespace repl {
class ReadConcernArgs;
class SpeculativeMajorityReadInfo;
-}
+} // namespace repl
/**
* Given the specified read concern arguments, performs checks that the read concern can actually be
diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp
index 7844f28ebea..ea270fef283 100644
--- a/src/mongo/db/read_concern_mongod.cpp
+++ b/src/mongo/db/read_concern_mongod.cpp
@@ -29,7 +29,6 @@
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
-#include "mongo/db/read_concern.h"
#include "mongo/base/status.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
@@ -37,6 +36,7 @@
#include "mongo/db/logical_clock.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/read_concern.h"
#include "mongo/db/read_concern_mongod_gen.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/repl_client_info.h"
@@ -168,10 +168,9 @@ Status makeNoopWriteIfNeeded(OperationContext* opCtx, LogicalTime clusterTime) {
opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
"admin",
- BSON("appendOplogNote" << 1 << "maxClusterTime" << clusterTime.asTimestamp()
- << "data"
- << BSON("noop write for afterClusterTime read concern"
- << 1)),
+ BSON("appendOplogNote"
+ << 1 << "maxClusterTime" << clusterTime.asTimestamp() << "data"
+ << BSON("noop write for afterClusterTime read concern" << 1)),
Shard::RetryPolicy::kIdempotent);
status = swRes.getStatus();
std::get<1>(myWriteRequest)->set(status);
@@ -295,8 +294,7 @@ MONGO_REGISTER_SHIM(waitForReadConcern)
<< " value must not be greater than the current clusterTime. "
"Requested clusterTime: "
<< targetClusterTime->toString()
- << "; current clusterTime: "
- << currentTime.toString()};
+ << "; current clusterTime: " << currentTime.toString()};
}
auto status = makeNoopWriteIfNeeded(opCtx, *targetClusterTime);
diff --git a/src/mongo/db/read_concern_test.cpp b/src/mongo/db/read_concern_test.cpp
index df078b59aca..50ff8761aeb 100644
--- a/src/mongo/db/read_concern_test.cpp
+++ b/src/mongo/db/read_concern_test.cpp
@@ -48,9 +48,7 @@ using ReadConcernTest = ReplCoordTest;
TEST_F(ReadConcernTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 3ac2f9c6a06..092857b8a81 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -98,10 +98,7 @@ StatusWith<IndexNameObjs> getIndexNameObjs(OperationContext* opCtx,
return Status(
ErrorCodes::CannotCreateIndex,
str::stream()
- << "Cannot rebuild index "
- << spec
- << ": "
- << keyStatus.reason()
+ << "Cannot rebuild index " << spec << ": " << keyStatus.reason()
<< " For more info see http://dochub.mongodb.org/core/index-validation");
}
}
@@ -126,7 +123,7 @@ Status rebuildIndexesOnCollection(OperationContext* opCtx,
return swRebuild.getStatus();
}
- auto[numRecords, dataSize] = swRebuild.getValue();
+ auto [numRecords, dataSize] = swRebuild.getValue();
auto rs = collection->getRecordStore();
diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp
index b3ffa50a846..dcd9b7cf3ba 100644
--- a/src/mongo/db/repair_database_and_check_version.cpp
+++ b/src/mongo/db/repair_database_and_check_version.cpp
@@ -243,9 +243,9 @@ bool hasReplSetConfigDoc(OperationContext* opCtx) {
}
/**
-* Check that the oplog is capped, and abort the process if it is not.
-* Caller must lock DB before calling this function.
-*/
+ * Check that the oplog is capped, and abort the process if it is not.
+ * Caller must lock DB before calling this function.
+ */
void checkForCappedOplog(OperationContext* opCtx, Database* db) {
const NamespaceString oplogNss(NamespaceString::kRsOplogNamespace);
invariant(opCtx->lockState()->isDbLockedForMode(oplogNss.db(), MODE_IS));
@@ -283,15 +283,13 @@ void rebuildIndexes(OperationContext* opCtx, StorageEngine* storageEngine) {
fassert(40590,
{ErrorCodes::InternalError,
str::stream() << "failed to get index spec for index " << indexName
- << " in collection "
- << collNss.toString()});
+ << " in collection " << collNss.toString()});
}
auto& indexesToRebuild = swIndexSpecs.getValue();
invariant(indexesToRebuild.first.size() == 1 && indexesToRebuild.second.size() == 1,
str::stream() << "Num Index Names: " << indexesToRebuild.first.size()
- << " Num Index Objects: "
- << indexesToRebuild.second.size());
+ << " Num Index Objects: " << indexesToRebuild.second.size());
auto& ino = nsToIndexNameObjMap[collNss.ns()];
ino.first.emplace_back(std::move(indexesToRebuild.first.back()));
ino.second.emplace_back(std::move(indexesToRebuild.second.back()));
@@ -515,8 +513,7 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) {
<< swVersion.getStatus()
<< "). If the current featureCompatibilityVersion is below "
"4.0, see the documentation on upgrading at "
- << feature_compatibility_version_documentation::kUpgradeLink
- << ".",
+ << feature_compatibility_version_documentation::kUpgradeLink << ".",
swVersion.isOK());
fcvDocumentExists = true;
@@ -535,8 +532,9 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) {
<< startupWarningsLog;
log() << "** To fix this, use the setFeatureCompatibilityVersion "
<< "command to resume upgrade to 4.2." << startupWarningsLog;
- } else if (version == ServerGlobalParams::FeatureCompatibility::Version::
- kDowngradingTo40) {
+ } else if (version ==
+ ServerGlobalParams::FeatureCompatibility::Version::
+ kDowngradingTo40) {
log() << "** WARNING: A featureCompatibilityVersion downgrade did not "
<< "complete. " << startupWarningsLog;
log() << "** The current featureCompatibilityVersion is "
diff --git a/src/mongo/db/repl/abstract_async_component.cpp b/src/mongo/db/repl/abstract_async_component.cpp
index 181f2f5ef69..1b99507fc5c 100644
--- a/src/mongo/db/repl/abstract_async_component.cpp
+++ b/src/mongo/db/repl/abstract_async_component.cpp
@@ -189,16 +189,15 @@ Status AbstractAsyncComponent::_scheduleWorkAtAndSaveHandle_inlock(
const std::string& name) {
invariant(handle);
if (_isShuttingDown_inlock()) {
- return Status(
- ErrorCodes::CallbackCanceled,
- str::stream() << "failed to schedule work " << name << " at " << when.toString() << ": "
- << _componentName
- << " is shutting down");
+ return Status(ErrorCodes::CallbackCanceled,
+ str::stream()
+ << "failed to schedule work " << name << " at " << when.toString() << ": "
+ << _componentName << " is shutting down");
}
auto result = _executor->scheduleWorkAt(when, std::move(work));
if (!result.isOK()) {
- return result.getStatus().withContext(
- str::stream() << "failed to schedule work " << name << " at " << when.toString());
+ return result.getStatus().withContext(str::stream() << "failed to schedule work " << name
+ << " at " << when.toString());
}
*handle = result.getValue();
return Status::OK();
diff --git a/src/mongo/db/repl/abstract_async_component.h b/src/mongo/db/repl/abstract_async_component.h
index 8d5e784b591..64d88ad41e8 100644
--- a/src/mongo/db/repl/abstract_async_component.h
+++ b/src/mongo/db/repl/abstract_async_component.h
@@ -247,8 +247,7 @@ Status AbstractAsyncComponent::_startupComponent_inlock(std::unique_ptr<T>& comp
component.reset();
return Status(ErrorCodes::CallbackCanceled,
str::stream() << "failed to start up " << componentToStartUp << ": "
- << _componentName
- << " is shutting down");
+ << _componentName << " is shutting down");
}
auto status = component->startup();
diff --git a/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp b/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp
index f3d44242ffb..882cf5f4fa8 100644
--- a/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp
+++ b/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp
@@ -135,4 +135,4 @@ executor::RemoteCommandRequest AbstractOplogFetcherTest::processNetworkResponse(
}
} // namespace repl
-} // namespace mango
+} // namespace mongo
diff --git a/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h b/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h
index 2164f93cac6..7349689bb32 100644
--- a/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h
+++ b/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h
@@ -97,4 +97,4 @@ protected:
Date_t lastFetchedWall;
};
} // namespace repl
-} // namespace mango
+} // namespace mongo
diff --git a/src/mongo/db/repl/applier_helpers.cpp b/src/mongo/db/repl/applier_helpers.cpp
index ef92ed6c52d..1672585a071 100644
--- a/src/mongo/db/repl/applier_helpers.cpp
+++ b/src/mongo/db/repl/applier_helpers.cpp
@@ -196,8 +196,7 @@ StatusWith<InsertGroup::ConstIterator> InsertGroup::groupAndApplyInserts(ConstIt
// application of an individual op.
auto status = exceptionToStatus().withContext(
str::stream() << "Error applying inserts in bulk: " << redact(groupedInsertObj)
- << ". Trying first insert as a lone insert: "
- << redact(entry.raw));
+ << ". Trying first insert as a lone insert: " << redact(entry.raw));
// It's not an error during initial sync to encounter DuplicateKey errors.
if (Mode::kInitialSync == _mode && ErrorCodes::DuplicateKey == status) {
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 9b3d0b50632..a562679e63d 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -159,9 +159,7 @@ Status _applyOps(OperationContext* opCtx,
ErrorCodes::AtomicityFailure,
str::stream()
<< "cannot apply insert or update operation on a non-existent namespace "
- << nss.ns()
- << " in atomic applyOps mode: "
- << redact(opObj));
+ << nss.ns() << " in atomic applyOps mode: " << redact(opObj));
}
// Reject malformed operations in an atomic applyOps.
@@ -171,8 +169,7 @@ Status _applyOps(OperationContext* opCtx,
uasserted(ErrorCodes::AtomicityFailure,
str::stream()
<< "cannot apply a malformed operation in atomic applyOps mode: "
- << redact(opObj)
- << "; will retry without atomicity: "
+ << redact(opObj) << "; will retry without atomicity: "
<< exceptionToStatus().toString());
}
@@ -231,9 +228,7 @@ Status _applyOps(OperationContext* opCtx,
str::stream()
<< "cannot apply insert or update operation on a "
"non-existent namespace "
- << nss.ns()
- << ": "
- << mongo::redact(opObj));
+ << nss.ns() << ": " << mongo::redact(opObj));
}
OldClientContext ctx(opCtx, nss.ns());
diff --git a/src/mongo/db/repl/apply_ops.h b/src/mongo/db/repl/apply_ops.h
index c5cca31569f..8aac61a39b9 100644
--- a/src/mongo/db/repl/apply_ops.h
+++ b/src/mongo/db/repl/apply_ops.h
@@ -116,7 +116,7 @@ Status applyOps(OperationContext* opCtx,
/**
* Applies a non-transactional 'applyOps' oplog entry. That is, an 'applyOps' entry that was not
* generated by a transaction.
-*/
+ */
Status applyApplyOpsOplogEntry(OperationContext* opCtx,
const OplogEntry& entry,
repl::OplogApplication::Mode oplogApplicationMode);
diff --git a/src/mongo/db/repl/apply_ops_test.cpp b/src/mongo/db/repl/apply_ops_test.cpp
index 13575d81fb0..2f38aa54a5f 100644
--- a/src/mongo/db/repl/apply_ops_test.cpp
+++ b/src/mongo/db/repl/apply_ops_test.cpp
@@ -141,17 +141,13 @@ TEST_F(ApplyOpsTest, CommandInNestedApplyOpsReturnsSuccess) {
auto mode = OplogApplication::Mode::kApplyOpsCmd;
BSONObjBuilder resultBuilder;
NamespaceString nss("test", "foo");
- auto innerCmdObj = BSON("op"
- << "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
- << BSON("create" << nss.coll()));
+ auto innerCmdObj =
+ BSON("op"
+ << "c"
+ << "ns" << nss.getCommandNS().ns() << "o" << BSON("create" << nss.coll()));
auto innerApplyOpsObj = BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
+ << "ns" << nss.getCommandNS().ns() << "o"
<< BSON("applyOps" << BSON_ARRAY(innerCmdObj)));
auto cmdObj = BSON("applyOps" << BSON_ARRAY(innerApplyOpsObj));
@@ -169,18 +165,13 @@ TEST_F(ApplyOpsTest, InsertInNestedApplyOpsReturnsSuccess) {
NamespaceString nss("test", "foo");
auto innerCmdObj = BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
+ << "ns" << nss.ns() << "o"
<< BSON("_id"
<< "a")
- << "ui"
- << options.uuid.get());
+ << "ui" << options.uuid.get());
auto innerApplyOpsObj = BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
+ << "ns" << nss.getCommandNS().ns() << "o"
<< BSON("applyOps" << BSON_ARRAY(innerCmdObj)));
auto cmdObj = BSON("applyOps" << BSON_ARRAY(innerApplyOpsObj));
@@ -206,18 +197,10 @@ BSONObj makeApplyOpsWithInsertOperation(const NamespaceString& nss,
const BSONObj& documentToInsert) {
auto insertOp = uuid ? BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
- << documentToInsert
- << "ui"
- << *uuid)
+ << "ns" << nss.ns() << "o" << documentToInsert << "ui" << *uuid)
: BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
- << documentToInsert);
+ << "ns" << nss.ns() << "o" << documentToInsert);
return BSON("applyOps" << BSON_ARRAY(insertOp));
}
@@ -395,53 +378,35 @@ TEST_F(ApplyOpsTest, ExtractOperationsReturnsOperationsWithSameOpTimeAsApplyOps)
auto ui1 = UUID::gen();
auto op1 = BSON("op"
<< "i"
- << "ns"
- << ns1.ns()
- << "ui"
- << ui1
- << "o"
- << BSON("_id" << 1));
+ << "ns" << ns1.ns() << "ui" << ui1 << "o" << BSON("_id" << 1));
NamespaceString ns2("test.b");
auto ui2 = UUID::gen();
auto op2 = BSON("op"
<< "i"
- << "ns"
- << ns2.ns()
- << "ui"
- << ui2
- << "o"
- << BSON("_id" << 2));
+ << "ns" << ns2.ns() << "ui" << ui2 << "o" << BSON("_id" << 2));
NamespaceString ns3("test.c");
auto ui3 = UUID::gen();
auto op3 = BSON("op"
<< "u"
- << "ns"
- << ns3.ns()
- << "ui"
- << ui3
- << "b"
- << true
- << "o"
- << BSON("x" << 1)
- << "o2"
- << BSON("_id" << 3));
+ << "ns" << ns3.ns() << "ui" << ui3 << "b" << true << "o" << BSON("x" << 1)
+ << "o2" << BSON("_id" << 3));
auto oplogEntry =
makeOplogEntry(OpTypeEnum::kCommand, BSON("applyOps" << BSON_ARRAY(op1 << op2 << op3)));
auto operations = ApplyOps::extractOperations(oplogEntry);
- ASSERT_EQUALS(3U, operations.size()) << "Unexpected number of operations extracted: "
- << oplogEntry.toBSON();
+ ASSERT_EQUALS(3U, operations.size())
+ << "Unexpected number of operations extracted: " << oplogEntry.toBSON();
// Check extracted CRUD operations.
auto it = operations.cbegin();
{
ASSERT(operations.cend() != it);
const auto& operation1 = *(it++);
- ASSERT(OpTypeEnum::kInsert == operation1.getOpType()) << "Unexpected op type: "
- << operation1.toBSON();
+ ASSERT(OpTypeEnum::kInsert == operation1.getOpType())
+ << "Unexpected op type: " << operation1.toBSON();
ASSERT_EQUALS(ui1, *operation1.getUuid());
ASSERT_EQUALS(ns1, operation1.getNss());
ASSERT_BSONOBJ_EQ(BSON("_id" << 1), operation1.getOperationToApply());
@@ -453,8 +418,8 @@ TEST_F(ApplyOpsTest, ExtractOperationsReturnsOperationsWithSameOpTimeAsApplyOps)
{
ASSERT(operations.cend() != it);
const auto& operation2 = *(it++);
- ASSERT(OpTypeEnum::kInsert == operation2.getOpType()) << "Unexpected op type: "
- << operation2.toBSON();
+ ASSERT(OpTypeEnum::kInsert == operation2.getOpType())
+ << "Unexpected op type: " << operation2.toBSON();
ASSERT_EQUALS(ui2, *operation2.getUuid());
ASSERT_EQUALS(ns2, operation2.getNss());
ASSERT_BSONOBJ_EQ(BSON("_id" << 2), operation2.getOperationToApply());
@@ -466,8 +431,8 @@ TEST_F(ApplyOpsTest, ExtractOperationsReturnsOperationsWithSameOpTimeAsApplyOps)
{
ASSERT(operations.cend() != it);
const auto& operation3 = *(it++);
- ASSERT(OpTypeEnum::kUpdate == operation3.getOpType()) << "Unexpected op type: "
- << operation3.toBSON();
+ ASSERT(OpTypeEnum::kUpdate == operation3.getOpType())
+ << "Unexpected op type: " << operation3.toBSON();
ASSERT_EQUALS(ui3, *operation3.getUuid());
ASSERT_EQUALS(ns3, operation3.getNss());
ASSERT_BSONOBJ_EQ(BSON("x" << 1), operation3.getOperationToApply());
@@ -495,9 +460,7 @@ TEST_F(ApplyOpsTest, ApplyOpsFailsToDropAdmin) {
auto dropDatabaseOp = BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
+ << "ns" << nss.getCommandNS().ns() << "o"
<< BSON("dropDatabase" << 1));
auto dropDatabaseCmdObj = BSON("applyOps" << BSON_ARRAY(dropDatabaseOp));
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.cpp b/src/mongo/db/repl/base_cloner_test_fixture.cpp
index 359f6a2c4a2..6d7918a7f5c 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.cpp
+++ b/src/mongo/db/repl/base_cloner_test_fixture.cpp
@@ -47,8 +47,7 @@ const HostAndPort BaseClonerTest::target("localhost", -1);
const NamespaceString BaseClonerTest::nss("db.coll");
const BSONObj BaseClonerTest::idIndexSpec = BSON("v" << 1 << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << nss.ns());
+ << "ns" << nss.ns());
// static
BSONObj BaseClonerTest::createCountResponse(int documentCount) {
diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h
index 7e64afae171..194bf202b8f 100644
--- a/src/mongo/db/repl/bgsync.h
+++ b/src/mongo/db/repl/bgsync.h
@@ -215,17 +215,17 @@ private:
ReplicationProcess* _replicationProcess;
/**
- * All member variables are labeled with one of the following codes indicating the
- * synchronization rules for accessing them:
- *
- * (PR) Completely private to BackgroundSync. Can be read or written to from within the main
- * BackgroundSync thread without synchronization. Shouldn't be accessed outside of this
- * thread.
- *
- * (S) Self-synchronizing; access in any way from any context.
- *
- * (M) Reads and writes guarded by _mutex
- *
+ * All member variables are labeled with one of the following codes indicating the
+ * synchronization rules for accessing them:
+ *
+ * (PR) Completely private to BackgroundSync. Can be read or written to from within the main
+ * BackgroundSync thread without synchronization. Shouldn't be accessed outside of this
+ * thread.
+ *
+ * (S) Self-synchronizing; access in any way from any context.
+ *
+ * (M) Reads and writes guarded by _mutex
+ *
*/
// Protects member data of BackgroundSync.
diff --git a/src/mongo/db/repl/check_quorum_for_config_change.cpp b/src/mongo/db/repl/check_quorum_for_config_change.cpp
index 7f6f6af9672..8f2dfc40664 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/repl/scatter_gather_algorithm.h"
#include "mongo/db/repl/scatter_gather_runner.h"
#include "mongo/db/server_options.h"
-#include "mongo/db/server_options.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -199,8 +198,8 @@ void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& reque
Status hbStatus = hbResp.initialize(resBSON, 0, /*requireWallTime*/ false);
if (hbStatus.code() == ErrorCodes::InconsistentReplicaSetNames) {
- std::string message = str::stream() << "Our set name did not match that of "
- << request.target.toString();
+ std::string message = str::stream()
+ << "Our set name did not match that of " << request.target.toString();
_vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
warning() << message;
return;
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index 89e352b3ae6..a80a9160896 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -47,18 +47,18 @@
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
-#define ASSERT_REASON_CONTAINS(STATUS, PATTERN) \
- do { \
- const mongo::Status s_ = (STATUS); \
- ASSERT_FALSE(s_.reason().find(PATTERN) == std::string::npos) << #STATUS ".reason() == " \
- << s_.reason(); \
+#define ASSERT_REASON_CONTAINS(STATUS, PATTERN) \
+ do { \
+ const mongo::Status s_ = (STATUS); \
+ ASSERT_FALSE(s_.reason().find(PATTERN) == std::string::npos) \
+ << #STATUS ".reason() == " << s_.reason(); \
} while (false)
-#define ASSERT_NOT_REASON_CONTAINS(STATUS, PATTERN) \
- do { \
- const mongo::Status s_ = (STATUS); \
- ASSERT_TRUE(s_.reason().find(PATTERN) == std::string::npos) << #STATUS ".reason() == " \
- << s_.reason(); \
+#define ASSERT_NOT_REASON_CONTAINS(STATUS, PATTERN) \
+ do { \
+ const mongo::Status s_ = (STATUS); \
+ ASSERT_TRUE(s_.reason().find(PATTERN) == std::string::npos) \
+ << #STATUS ".reason() == " << s_.reason(); \
} while (false)
namespace mongo {
@@ -140,30 +140,24 @@ ReplSetConfig assertMakeRSConfig(const BSONObj& configBson) {
}
TEST_F(CheckQuorumForInitiate, ValidSingleNodeSet) {
- ReplSetConfig config = assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1"))));
+ ReplSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1"))));
startQuorumCheck(config, 0);
ASSERT_OK(waitForQuorumCheck());
}
TEST_F(CheckQuorumForInitiate, QuorumCheckCanceledByShutdown) {
getExecutor().shutdown();
- ReplSetConfig config = assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1"))));
+ ReplSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1"))));
startQuorumCheck(config, 0);
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, waitForQuorumCheck());
}
@@ -172,23 +166,20 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSeveralDownNodes) {
// In this test, "we" are host "h3:1". All other nodes time out on
// their heartbeat request, and so the quorum check for initiate
// will fail because some members were unavailable.
- ReplSetConfig config = assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1")
- << BSON("_id" << 3 << "host"
- << "h3:1")
- << BSON("_id" << 4 << "host"
- << "h4:1")
- << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ ReplSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
startQuorumCheck(config, 2);
getNet()->enterNetwork();
const Date_t startDate = getNet()->now();
@@ -254,11 +245,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckSuccessForFiveNodes) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -282,8 +269,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckSuccessForFiveNodes) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
getNet()->scheduleResponse(
noi, startDate + Milliseconds(10), makeHeartbeatResponse(rsConfig, Milliseconds(8)));
}
@@ -301,19 +288,12 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToOneDownNode) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
<< "h2:1"
- << "priority"
- << 0
- << "votes"
- << 0)
+ << "priority" << 0 << "votes" << 0)
<< BSON("_id" << 3 << "host"
<< "h3:1")
<< BSON("_id" << 4 << "host"
@@ -335,8 +315,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToOneDownNode) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h2", 1)) {
getNet()->scheduleResponse(
noi, startDate + Milliseconds(10), {ErrorCodes::NoSuchKey, "No response"});
@@ -368,11 +348,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetNameMismatch) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -396,8 +372,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetNameMismatch) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h4", 1)) {
getNet()->scheduleResponse(
noi,
@@ -433,11 +409,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -448,8 +420,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
<< "h4:1")
<< BSON("_id" << 5 << "host"
<< "h5:1"))
- << "settings"
- << BSON("replicaSetId" << replicaSetId)));
+ << "settings" << BSON("replicaSetId" << replicaSetId)));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -466,8 +437,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == incompatibleHost) {
OpTime opTime{Timestamp{10, 10}, 10};
Date_t wallTime = Date_t();
@@ -498,10 +469,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
ASSERT_REASON_CONTAINS(status,
str::stream() << "Our replica set ID of " << replicaSetId
- << " did not match that of "
- << incompatibleHost.toString()
- << ", which is "
- << unexpectedId);
+ << " did not match that of " << incompatibleHost.toString()
+ << ", which is " << unexpectedId);
ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
@@ -517,11 +486,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -545,8 +510,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
long long configVersion = 1;
getNet()->scheduleResponse(
@@ -581,11 +546,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespo
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -609,8 +570,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespo
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
long long configVersion = 1;
getNet()->scheduleResponse(
@@ -641,11 +602,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -665,8 +622,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1)) {
long long configVersion = 5;
getNet()->scheduleResponse(
@@ -695,11 +652,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToIncompatibleSetName) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -719,8 +672,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToIncompatibleSetName) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h2", 1)) {
getNet()->scheduleResponse(
noi,
@@ -753,11 +706,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -766,16 +715,10 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
<< "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "votes"
- << 0
- << "priority"
- << 0))));
+ << "votes" << 0 << "priority" << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -789,8 +732,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h5", 1)) {
getNet()->scheduleResponse(noi,
startDate + Milliseconds(10),
@@ -819,11 +762,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -832,12 +771,10 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
<< "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "priority"
- << 0))));
+ << "priority" << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -851,8 +788,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
getNet()->scheduleResponse(noi,
startDate + Milliseconds(10),
@@ -877,11 +814,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -890,16 +823,10 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
<< "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "votes"
- << 0
- << "priority"
- << 0))));
+ << "votes" << 0 << "priority" << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -913,8 +840,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h2", 1)) {
getNet()->scheduleResponse(noi,
startDate + Milliseconds(10),
@@ -937,11 +864,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckProcessesCallbackCanceledResponse) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -961,8 +884,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckProcessesCallbackCanceledResponse) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1)) {
getNet()->scheduleResponse(
noi,
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index a19dc5c6c8d..68f760d284a 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -75,42 +75,41 @@ CollectionBulkLoaderImpl::~CollectionBulkLoaderImpl() {
}
Status CollectionBulkLoaderImpl::init(const std::vector<BSONObj>& secondaryIndexSpecs) {
- return _runTaskReleaseResourcesOnFailure(
- [ coll = _autoColl->getCollection(), &secondaryIndexSpecs, this ]()->Status {
- // All writes in CollectionBulkLoaderImpl should be unreplicated.
- // The opCtx is accessed indirectly through _secondaryIndexesBlock.
- UnreplicatedWritesBlock uwb(_opCtx.get());
- // This enforces the buildIndexes setting in the replica set configuration.
- auto indexCatalog = coll->getIndexCatalog();
- auto specs =
- indexCatalog->removeExistingIndexesNoChecks(_opCtx.get(), secondaryIndexSpecs);
- if (specs.size()) {
- _secondaryIndexesBlock->ignoreUniqueConstraint();
- auto status =
- _secondaryIndexesBlock
- ->init(_opCtx.get(), _collection, specs, MultiIndexBlock::kNoopOnInitFn)
- .getStatus();
- if (!status.isOK()) {
- return status;
- }
- } else {
- _secondaryIndexesBlock.reset();
+ return _runTaskReleaseResourcesOnFailure([coll = _autoColl->getCollection(),
+ &secondaryIndexSpecs,
+ this]() -> Status {
+ // All writes in CollectionBulkLoaderImpl should be unreplicated.
+ // The opCtx is accessed indirectly through _secondaryIndexesBlock.
+ UnreplicatedWritesBlock uwb(_opCtx.get());
+ // This enforces the buildIndexes setting in the replica set configuration.
+ auto indexCatalog = coll->getIndexCatalog();
+ auto specs = indexCatalog->removeExistingIndexesNoChecks(_opCtx.get(), secondaryIndexSpecs);
+ if (specs.size()) {
+ _secondaryIndexesBlock->ignoreUniqueConstraint();
+ auto status =
+ _secondaryIndexesBlock
+ ->init(_opCtx.get(), _collection, specs, MultiIndexBlock::kNoopOnInitFn)
+ .getStatus();
+ if (!status.isOK()) {
+ return status;
}
- if (!_idIndexSpec.isEmpty()) {
- auto status =
- _idIndexBlock
- ->init(
- _opCtx.get(), _collection, _idIndexSpec, MultiIndexBlock::kNoopOnInitFn)
- .getStatus();
- if (!status.isOK()) {
- return status;
- }
- } else {
- _idIndexBlock.reset();
+ } else {
+ _secondaryIndexesBlock.reset();
+ }
+ if (!_idIndexSpec.isEmpty()) {
+ auto status =
+ _idIndexBlock
+ ->init(_opCtx.get(), _collection, _idIndexSpec, MultiIndexBlock::kNoopOnInitFn)
+ .getStatus();
+ if (!status.isOK()) {
+ return status;
}
+ } else {
+ _idIndexBlock.reset();
+ }
- return Status::OK();
- });
+ return Status::OK();
+ });
}
Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::const_iterator begin,
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index e86e8ce022c..cf3d0b63bde 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -132,8 +132,8 @@ CollectionCloner::CollectionCloner(executor::TaskExecutor* executor,
_sourceNss.db().toString(),
makeCommandWithUUIDorCollectionName("listIndexes", _options.uuid, sourceNss),
[this](const Fetcher::QueryResponseStatus& fetchResult,
- Fetcher::NextAction * nextAction,
- BSONObjBuilder * getMoreBob) {
+ Fetcher::NextAction* nextAction,
+ BSONObjBuilder* getMoreBob) {
_listIndexesCallback(fetchResult, nextAction, getMoreBob);
},
ReadPreferenceSetting::secondaryPreferredMetadata(),
@@ -332,9 +332,7 @@ void CollectionCloner::_countCallback(
_finishCallback(countStatus.withContext(
str::stream() << "There was an error parsing document count from count "
"command result on collection "
- << _sourceNss.ns()
- << " from "
- << _source.toString()));
+ << _sourceNss.ns() << " from " << _source.toString()));
return;
}
}
@@ -343,8 +341,7 @@ void CollectionCloner::_countCallback(
_finishCallback({ErrorCodes::BadValue,
str::stream() << "Count call on collection " << _sourceNss.ns() << " from "
<< _source.toString()
- << " returned negative document count: "
- << count});
+ << " returned negative document count: " << count});
return;
}
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index 20c4eb00ae3..09e61df9080 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -261,15 +261,15 @@ void CollectionClonerTest::setUp() {
const BSONObj idIndexSpec,
const std::vector<BSONObj>& nonIdIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoaderMock>> {
- auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collectionStats);
- Status result = localLoader->init(nonIdIndexSpecs);
- if (!result.isOK())
- return result;
+ auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collectionStats);
+ Status result = localLoader->init(nonIdIndexSpecs);
+ if (!result.isOK())
+ return result;
- _loader = localLoader.get();
+ _loader = localLoader.get();
- return std::move(localLoader);
- };
+ return std::move(localLoader);
+ };
_server = std::make_unique<MockRemoteDBServer>(target.toString());
_server->assignCollectionUuid(nss.ns(), *options.uuid);
_client = new FailableMockDBClientConnection(_server.get(), getNet());
@@ -283,12 +283,10 @@ void CollectionClonerTest::setUp() {
std::vector<BSONObj> CollectionClonerTest::makeSecondaryIndexSpecs(const NamespaceString& nss) {
return {BSON("v" << 1 << "key" << BSON("a" << 1) << "name"
<< "a_1"
- << "ns"
- << nss.ns()),
+ << "ns" << nss.ns()),
BSON("v" << 1 << "key" << BSON("b" << 1) << "name"
<< "b_1"
- << "ns"
- << nss.ns())};
+ << "ns" << nss.ns())};
}
void CollectionClonerTest::tearDown() {
@@ -443,8 +441,7 @@ TEST_F(CollectionClonerTest, CollectionClonerPassesThroughCommandStatusErrorFrom
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "count error"
- << "code"
- << int(ErrorCodes::OperationFailed)));
+ << "code" << int(ErrorCodes::OperationFailed)));
}
collectionCloner->join();
ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus());
@@ -566,15 +563,15 @@ TEST_F(CollectionClonerNoAutoIndexTest, DoNotCreateIDIndexIfAutoIndexIdUsed) {
const BSONObj idIndexSpec,
const std::vector<BSONObj>& theIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoader>> {
- auto loader = std::make_unique<CollectionBulkLoaderMock>(collectionStats);
- collNss = theNss;
- collOptions = theOptions;
- collIndexSpecs = theIndexSpecs;
- const auto status = loader->init(theIndexSpecs);
- if (!status.isOK())
- return status;
- return std::move(loader);
- };
+ auto loader = std::make_unique<CollectionBulkLoaderMock>(collectionStats);
+ collNss = theNss;
+ collOptions = theOptions;
+ collIndexSpecs = theIndexSpecs;
+ const auto status = loader->init(theIndexSpecs);
+ if (!status.isOK())
+ return status;
+ return std::move(loader);
+ };
const BSONObj doc = BSON("_id" << 1);
_server->insert(nss.ns(), doc);
@@ -633,13 +630,14 @@ TEST_F(CollectionClonerTest, ListIndexesReturnedNamespaceNotFound) {
bool collectionCreated = false;
bool writesAreReplicatedOnOpCtx = false;
NamespaceString collNss;
- storageInterface->createCollFn = [&collNss, &collectionCreated, &writesAreReplicatedOnOpCtx](
- OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) {
- writesAreReplicatedOnOpCtx = opCtx->writesAreReplicated();
- collectionCreated = true;
- collNss = nss;
- return Status::OK();
- };
+ storageInterface->createCollFn =
+ [&collNss, &collectionCreated, &writesAreReplicatedOnOpCtx](
+ OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) {
+ writesAreReplicatedOnOpCtx = opCtx->writesAreReplicated();
+ collectionCreated = true;
+ collNss = nss;
+ return Status::OK();
+ };
// Using a non-zero cursor to ensure that
// the cloner stops the fetcher from retrieving more results.
{
@@ -688,9 +686,9 @@ TEST_F(CollectionClonerTest,
// status.
auto exec = &getExecutor();
collectionCloner->setScheduleDbWorkFn_forTest([exec](
- executor::TaskExecutor::CallbackFn workFn) {
+ executor::TaskExecutor::CallbackFn workFn) {
auto wrappedTask = [workFn = std::move(workFn)](
- const executor::TaskExecutor::CallbackArgs& cbd) {
+ const executor::TaskExecutor::CallbackArgs& cbd) {
workFn(executor::TaskExecutor::CallbackArgs(
cbd.executor, cbd.myHandle, Status(ErrorCodes::CallbackCanceled, ""), cbd.opCtx));
};
@@ -698,8 +696,9 @@ TEST_F(CollectionClonerTest,
});
bool collectionCreated = false;
- storageInterface->createCollFn = [&collectionCreated](
- OperationContext*, const NamespaceString& nss, const CollectionOptions&) {
+ storageInterface->createCollFn = [&collectionCreated](OperationContext*,
+ const NamespaceString& nss,
+ const CollectionOptions&) {
collectionCreated = true;
return Status::OK();
};
@@ -1402,8 +1401,7 @@ TEST_F(CollectionClonerRenamedBeforeStartTest, BeginCollectionWithUUID) {
BSONObj expectedIdIndexSpec = BSON("v" << 1 << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << alternateNss.ns());
+ << "ns" << alternateNss.ns());
ASSERT_BSONOBJ_EQ(collIdIndexSpec, expectedIdIndexSpec);
auto expectedNonIdIndexSpecs = makeSecondaryIndexSpecs(alternateNss);
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index 267428f3831..1d1f3dda338 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -117,8 +117,8 @@ DatabaseCloner::DatabaseCloner(executor::TaskExecutor* executor,
_dbname,
createListCollectionsCommandObject(_listCollectionsFilter),
[=](const StatusWith<Fetcher::QueryResponse>& result,
- Fetcher::NextAction * nextAction,
- BSONObjBuilder * getMoreBob) {
+ Fetcher::NextAction* nextAction,
+ BSONObjBuilder* getMoreBob) {
_listCollectionsCallback(result, nextAction, getMoreBob);
},
ReadPreferenceSetting::secondaryPreferredMetadata(),
@@ -263,9 +263,8 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
BSONObjBuilder* getMoreBob) {
if (!result.isOK()) {
_finishCallback(result.getStatus().withContext(
- str::stream() << "Error issuing listCollections on db '" << _dbname << "' (host:"
- << _source.toString()
- << ")"));
+ str::stream() << "Error issuing listCollections on db '" << _dbname
+ << "' (host:" << _source.toString() << ")"));
return;
}
@@ -311,12 +310,11 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
for (auto&& info : _collectionInfos) {
BSONElement nameElement = info.getField(kNameFieldName);
if (nameElement.eoo()) {
- _finishCallback_inlock(
- lk,
- {ErrorCodes::FailedToParse,
- str::stream() << "collection info must contain '" << kNameFieldName << "' "
- << "field : "
- << info});
+ _finishCallback_inlock(lk,
+ {ErrorCodes::FailedToParse,
+ str::stream() << "collection info must contain '"
+ << kNameFieldName << "' "
+ << "field : " << info});
return;
}
if (nameElement.type() != mongo::String) {
@@ -332,29 +330,24 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
{ErrorCodes::Error(51005),
str::stream()
<< "collection info contains duplicate collection name "
- << "'"
- << collectionName
- << "': "
- << info});
+ << "'" << collectionName << "': " << info});
return;
}
BSONElement optionsElement = info.getField(kOptionsFieldName);
if (optionsElement.eoo()) {
- _finishCallback_inlock(
- lk,
- {ErrorCodes::FailedToParse,
- str::stream() << "collection info must contain '" << kOptionsFieldName << "' "
- << "field : "
- << info});
+ _finishCallback_inlock(lk,
+ {ErrorCodes::FailedToParse,
+ str::stream() << "collection info must contain '"
+ << kOptionsFieldName << "' "
+ << "field : " << info});
return;
}
if (!optionsElement.isABSONObj()) {
_finishCallback_inlock(lk,
Status(ErrorCodes::TypeMismatch,
str::stream() << "'" << kOptionsFieldName
- << "' field must be an object: "
- << info));
+ << "' field must be an object: " << info));
return;
}
const BSONObj optionsObj = optionsElement.Obj();
@@ -426,8 +419,8 @@ void DatabaseCloner::_collectionClonerCallback(const Status& status, const Names
// Record failure, but do not return just yet, in case we want to do some logging.
if (!status.isOK()) {
- collStatus = status.withContext(
- str::stream() << "Error cloning collection '" << nss.toString() << "'");
+ collStatus = status.withContext(str::stream()
+ << "Error cloning collection '" << nss.toString() << "'");
}
// Forward collection cloner result to caller.
diff --git a/src/mongo/db/repl/database_cloner_test.cpp b/src/mongo/db/repl/database_cloner_test.cpp
index 66948a482ef..25fc845b088 100644
--- a/src/mongo/db/repl/database_cloner_test.cpp
+++ b/src/mongo/db/repl/database_cloner_test.cpp
@@ -126,16 +126,16 @@ void DatabaseClonerTest::setUp() {
const BSONObj& idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoaderMock>> {
- const auto collInfo = &_collections[nss];
+ const auto collInfo = &_collections[nss];
- auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
- auto status = localLoader->init(secondaryIndexSpecs);
- if (!status.isOK())
- return status;
- collInfo->loader = localLoader.get();
+ auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
+ auto status = localLoader->init(secondaryIndexSpecs);
+ if (!status.isOK())
+ return status;
+ collInfo->loader = localLoader.get();
- return std::move(localLoader);
- };
+ return std::move(localLoader);
+ };
}
void DatabaseClonerTest::tearDown() {
@@ -335,8 +335,7 @@ TEST_F(DatabaseClonerTest, InvalidListCollectionsFilter) {
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "unknown operator"
- << "code"
- << ErrorCodes::BadValue));
+ << "code" << ErrorCodes::BadValue));
}
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
@@ -391,16 +390,13 @@ TEST_F(DatabaseClonerTest, ListCollectionsPredicate) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()),
+ << "options" << _options1.toBSON()),
BSON("name"
<< "b"
- << "options"
- << _options2.toBSON()),
+ << "options" << _options2.toBSON()),
BSON("name"
<< "c"
- << "options"
- << _options3.toBSON())};
+ << "options" << _options3.toBSON())};
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(createListCollectionsResponse(
@@ -425,12 +421,10 @@ TEST_F(DatabaseClonerTest, ListCollectionsMultipleBatches) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()),
+ << "options" << _options1.toBSON()),
BSON("name"
<< "b"
- << "options"
- << _options2.toBSON())};
+ << "options" << _options2.toBSON())};
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(createListCollectionsResponse(1, BSON_ARRAY(sourceInfos[0])));
@@ -512,8 +506,7 @@ TEST_F(DatabaseClonerTest, CollectionInfoNameEmpty) {
createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< ""
- << "options"
- << _options1.toBSON()))));
+ << "options" << _options1.toBSON()))));
}
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
@@ -534,12 +527,10 @@ TEST_F(DatabaseClonerTest, CollectionInfoNameDuplicate) {
createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options"
- << _options1.toBSON())
+ << "options" << _options1.toBSON())
<< BSON("name"
<< "a"
- << "options"
- << _options2.toBSON()))));
+ << "options" << _options2.toBSON()))));
}
ASSERT_EQUALS(51005, getStatus().code());
@@ -575,11 +566,11 @@ TEST_F(DatabaseClonerTest, CollectionInfoOptionsNotAnObject) {
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- processNetworkResponse(createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << 123))));
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << 123))));
}
ASSERT_EQUALS(ErrorCodes::TypeMismatch, getStatus().code());
@@ -596,12 +587,11 @@ TEST_F(DatabaseClonerTest, InvalidCollectionOptions) {
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << BSON("storageEngine" << 1)))));
+ processNetworkResponse(createListCollectionsResponse(
+ 0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << BSON("storageEngine" << 1)))));
}
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
@@ -617,11 +607,11 @@ TEST_F(DatabaseClonerTest, InvalidMissingUUID) {
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- processNetworkResponse(createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << BSONObj()))));
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << BSONObj()))));
}
ASSERT_EQUALS(50953, getStatus().code());
@@ -670,11 +660,11 @@ TEST_F(DatabaseClonerTest, ListCollectionsReturnsEmptyCollectionName) {
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- processNetworkResponse(createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << ""
- << "options"
- << BSONObj()))));
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << ""
+ << "options" << BSONObj()))));
}
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
@@ -700,8 +690,7 @@ TEST_F(DatabaseClonerTest, StartFirstCollectionClonerFailed) {
createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()))));
+ << "options" << _options1.toBSON()))));
}
ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
@@ -732,12 +721,10 @@ TEST_F(DatabaseClonerTest, StartSecondCollectionClonerFailed) {
createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options"
- << _options1.toBSON())
+ << "options" << _options1.toBSON())
<< BSON("name"
<< "b"
- << "options"
- << _options2.toBSON()))));
+ << "options" << _options2.toBSON()))));
processNetworkResponse(createCountResponse(0));
processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
@@ -763,8 +750,7 @@ TEST_F(DatabaseClonerTest, ShutdownCancelsCollectionCloning) {
0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options"
- << _options1.toBSON())))));
+ << "options" << _options1.toBSON())))));
net->runReadyNetworkOperations();
// CollectionCloner sends collection count request on startup.
@@ -797,12 +783,10 @@ TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()),
+ << "options" << _options1.toBSON()),
BSON("name"
<< "b"
- << "options"
- << _options2.toBSON())};
+ << "options" << _options2.toBSON())};
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(
@@ -818,8 +802,7 @@ TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) {
processNetworkResponse(createCountResponse(0));
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "fake message"
- << "code"
- << ErrorCodes::CursorNotFound));
+ << "code" << ErrorCodes::CursorNotFound));
processNetworkResponse(createCountResponse(0));
processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
@@ -854,12 +837,10 @@ TEST_F(DatabaseClonerTest, CreateCollections) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()),
+ << "options" << _options1.toBSON()),
BSON("name"
<< "b"
- << "options"
- << _options2.toBSON())};
+ << "options" << _options2.toBSON())};
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(
diff --git a/src/mongo/db/repl/databases_cloner_test.cpp b/src/mongo/db/repl/databases_cloner_test.cpp
index c13154b179a..a631fff5dbc 100644
--- a/src/mongo/db/repl/databases_cloner_test.cpp
+++ b/src/mongo/db/repl/databases_cloner_test.cpp
@@ -177,19 +177,19 @@ protected:
const BSONObj idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoaderMock>> {
- // Get collection info from map.
- const auto collInfo = &_collections[nss];
- if (collInfo->stats->initCalled) {
- log() << "reusing collection during test which may cause problems, ns:" << nss;
- }
- auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
- auto status = localLoader->init(secondaryIndexSpecs);
- if (!status.isOK())
- return status;
- collInfo->loader = localLoader.get();
-
- return std::move(localLoader);
- };
+ // Get collection info from map.
+ const auto collInfo = &_collections[nss];
+ if (collInfo->stats->initCalled) {
+ log() << "reusing collection during test which may cause problems, ns:" << nss;
+ }
+ auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
+ auto status = localLoader->init(secondaryIndexSpecs);
+ if (!status.isOK())
+ return status;
+ collInfo->loader = localLoader.get();
+
+ return std::move(localLoader);
+ };
_dbWorkThreadPool.startup();
_target = HostAndPort{"local:1234"};
@@ -924,13 +924,13 @@ TEST_F(DBsClonerTest, SingleDatabaseCopiesCompletely) {
{"listDatabases", fromjson("{ok:1, databases:[{name:'a'}]}")},
// listCollections for "a"
{"listCollections",
- BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
- << "a.$cmd.listCollections"
- << "firstBatch"
- << BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << options.toBSON()))))},
+ BSON("ok" << 1 << "cursor"
+ << BSON("id" << 0ll << "ns"
+ << "a.$cmd.listCollections"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << options.toBSON()))))},
// count:a
{"count", BSON("n" << 1 << "ok" << 1)},
// listIndexes:a
@@ -957,13 +957,13 @@ TEST_F(DBsClonerTest, TwoDatabasesCopiesCompletely) {
{"listDatabases", fromjson("{ok:1, databases:[{name:'a'}, {name:'b'}]}")},
// listCollections for "a"
{"listCollections",
- BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
- << "a.$cmd.listCollections"
- << "firstBatch"
- << BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << options1.toBSON()))))},
+ BSON("ok" << 1 << "cursor"
+ << BSON("id" << 0ll << "ns"
+ << "a.$cmd.listCollections"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << options1.toBSON()))))},
// count:a
{"count", BSON("n" << 1 << "ok" << 1)},
// listIndexes:a
@@ -974,13 +974,13 @@ TEST_F(DBsClonerTest, TwoDatabasesCopiesCompletely) {
<< ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}")},
// listCollections for "b"
{"listCollections",
- BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
- << "b.$cmd.listCollections"
- << "firstBatch"
- << BSON_ARRAY(BSON("name"
- << "b"
- << "options"
- << options2.toBSON()))))},
+ BSON("ok" << 1 << "cursor"
+ << BSON("id" << 0ll << "ns"
+ << "b.$cmd.listCollections"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("name"
+ << "b"
+ << "options" << options2.toBSON()))))},
// count:b
{"count", BSON("n" << 2 << "ok" << 1)},
// listIndexes:b
diff --git a/src/mongo/db/repl/dbcheck.cpp b/src/mongo/db/repl/dbcheck.cpp
index 19c9507e981..a69b2c07e71 100644
--- a/src/mongo/db/repl/dbcheck.cpp
+++ b/src/mongo/db/repl/dbcheck.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/dbcheck.h"
-#include "mongo/db/repl/dbcheck.h"
#include "mongo/db/repl/dbcheck_gen.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/optime.h"
@@ -132,7 +131,7 @@ std::unique_ptr<HealthLogEntry> dbCheckHealthLogEntry(const NamespaceString& nss
entry->setData(data);
return entry;
}
-}
+} // namespace
/**
* Get an error message if the check fails.
@@ -161,14 +160,9 @@ std::unique_ptr<HealthLogEntry> dbCheckBatchEntry(const NamespaceString& nss,
const repl::OpTime& optime) {
auto hashes = expectedFound(expectedHash, foundHash);
- auto data =
- BSON("success" << true << "count" << count << "bytes" << bytes << "md5" << hashes.second
- << "minKey"
- << minKey.elem()
- << "maxKey"
- << maxKey.elem()
- << "optime"
- << optime);
+ auto data = BSON("success" << true << "count" << count << "bytes" << bytes << "md5"
+ << hashes.second << "minKey" << minKey.elem() << "maxKey"
+ << maxKey.elem() << "optime" << optime);
auto severity = hashes.first ? SeverityEnum::Info : SeverityEnum::Error;
std::string msg =
@@ -284,19 +278,9 @@ std::unique_ptr<HealthLogEntry> dbCheckCollectionEntry(const NamespaceString& ns
std::string msg =
"dbCheck collection " + (match ? std::string("consistent") : std::string("inconsistent"));
auto data = BSON("success" << true << "uuid" << uuid.toString() << "found" << true << "name"
- << names.second
- << "prev"
- << prevs.second
- << "next"
- << nexts.second
- << "indexes"
- << indices.second
- << "options"
- << options.second
- << "md5"
- << md5s.second
- << "optime"
- << optime);
+ << names.second << "prev" << prevs.second << "next" << nexts.second
+ << "indexes" << indices.second << "options" << options.second
+ << "md5" << md5s.second << "optime" << optime);
return dbCheckHealthLogEntry(nss, severity, msg, OplogEntriesEnum::Collection, data);
}
@@ -520,7 +504,7 @@ Status dbCheckDatabaseOnSecondary(OperationContext* opCtx,
return Status::OK();
}
-}
+} // namespace
namespace repl {
diff --git a/src/mongo/db/repl/dbcheck.h b/src/mongo/db/repl/dbcheck.h
index dde6de369b8..457087a9365 100644
--- a/src/mongo/db/repl/dbcheck.h
+++ b/src/mongo/db/repl/dbcheck.h
@@ -228,5 +228,5 @@ Status dbCheckOplogCommand(OperationContext* opCtx,
const repl::OplogEntry& entry,
OplogApplication::Mode mode,
boost::optional<Timestamp> stableTimestampForRecovery);
-}
-}
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/dbcheck_idl.h b/src/mongo/db/repl/dbcheck_idl.h
index c49bff7a5b1..9e2d9c880e6 100644
--- a/src/mongo/db/repl/dbcheck_idl.h
+++ b/src/mongo/db/repl/dbcheck_idl.h
@@ -91,4 +91,4 @@ private:
explicit BSONKey(const BSONElement& elem);
BSONObj _obj;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/repl/do_txn.cpp b/src/mongo/db/repl/do_txn.cpp
index 7f0f50787c9..d9d6f7bf2ce 100644
--- a/src/mongo/db/repl/do_txn.cpp
+++ b/src/mongo/db/repl/do_txn.cpp
@@ -126,9 +126,7 @@ Status _doTxn(OperationContext* opCtx,
uasserted(ErrorCodes::NamespaceNotFound,
str::stream() << "cannot apply insert, delete, or update operation on a "
"non-existent namespace "
- << nss->ns()
- << ": "
- << mongo::redact(opObj));
+ << nss->ns() << ": " << mongo::redact(opObj));
}
if (opObj.hasField("ui")) {
@@ -155,9 +153,7 @@ Status _doTxn(OperationContext* opCtx,
if (!collection) {
uasserted(ErrorCodes::NamespaceNotFound,
str::stream() << "cannot apply operation on a non-existent namespace "
- << nss->ns()
- << " with doTxn: "
- << redact(opObj));
+ << nss->ns() << " with doTxn: " << redact(opObj));
}
// Setting alwaysUpsert to true makes sense only during oplog replay, and doTxn commands
diff --git a/src/mongo/db/repl/do_txn_test.cpp b/src/mongo/db/repl/do_txn_test.cpp
index 3b92a944ea0..561579a069c 100644
--- a/src/mongo/db/repl/do_txn_test.cpp
+++ b/src/mongo/db/repl/do_txn_test.cpp
@@ -218,18 +218,10 @@ BSONObj makeInsertOperation(const NamespaceString& nss,
const BSONObj& documentToInsert) {
return uuid ? BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
- << documentToInsert
- << "ui"
- << *uuid)
+ << "ns" << nss.ns() << "o" << documentToInsert << "ui" << *uuid)
: BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
- << documentToInsert);
+ << "ns" << nss.ns() << "o" << documentToInsert);
}
/**
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
index 281b864d0d1..1e87ffa17e6 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
+++ b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
@@ -253,7 +253,7 @@ TEST_F(DropPendingCollectionReaperTest,
decltype(dpns) droppedNss;
bool writesAreReplicatedDuringDrop = true;
storageInterfaceMock.dropCollFn = [&droppedNss, &writesAreReplicatedDuringDrop](
- OperationContext* opCtx, const NamespaceString& nss) {
+ OperationContext* opCtx, const NamespaceString& nss) {
droppedNss = nss;
writesAreReplicatedDuringDrop = opCtx->writesAreReplicated();
return Status::OK();
diff --git a/src/mongo/db/repl/idempotency_test_fixture.cpp b/src/mongo/db/repl/idempotency_test_fixture.cpp
index a6c643b0ff0..0146f92a19d 100644
--- a/src/mongo/db/repl/idempotency_test_fixture.cpp
+++ b/src/mongo/db/repl/idempotency_test_fixture.cpp
@@ -661,12 +661,7 @@ template OplogEntry IdempotencyTest::update<const char*>(char const* _id, const
BSONObj makeInsertApplyOpsEntry(const NamespaceString& nss, const UUID& uuid, const BSONObj& doc) {
return BSON("op"
<< "i"
- << "ns"
- << nss.toString()
- << "ui"
- << uuid
- << "o"
- << doc);
+ << "ns" << nss.toString() << "ui" << uuid << "o" << doc);
}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 09127b6c6ba..9376eccb3dd 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -483,8 +483,7 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
auto status = _checkForShutdownAndConvertStatus_inlock(
callbackArgs,
str::stream() << "error while starting initial sync attempt " << (initialSyncAttempt + 1)
- << " of "
- << initialSyncMaxAttempts);
+ << " of " << initialSyncMaxAttempts);
if (!status.isOK()) {
_finishInitialSyncAttempt(status);
return;
@@ -748,11 +747,8 @@ void InitialSyncer::_getBeginFetchingOpTimeCallback(
Status(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "Expected to receive one document for the oldest active "
"transaction entry, but received: "
- << docs.size()
- << ". First: "
- << redact(docs.front())
- << ". Last: "
- << redact(docs.back())));
+ << docs.size() << ". First: " << redact(docs.front())
+ << ". Last: " << redact(docs.back())));
return;
}
@@ -859,11 +855,8 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
Status(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "Expected to receive one feature compatibility version "
"document, but received: "
- << docs.size()
- << ". First: "
- << redact(docs.front())
- << ". Last: "
- << redact(docs.back())));
+ << docs.size() << ". First: " << redact(docs.front())
+ << ". Last: " << redact(docs.back())));
return;
}
const auto hasDoc = docs.begin() != docs.end();
@@ -1528,8 +1521,8 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
}
Status InitialSyncer::_scheduleLastOplogEntryFetcher_inlock(Fetcher::CallbackFn callback) {
- BSONObj query = BSON(
- "find" << _opts.remoteOplogNS.coll() << "sort" << BSON("$natural" << -1) << "limit" << 1);
+ BSONObj query = BSON("find" << _opts.remoteOplogNS.coll() << "sort" << BSON("$natural" << -1)
+ << "limit" << 1);
_lastOplogEntryFetcher =
stdx::make_unique<Fetcher>(_exec,
@@ -1680,13 +1673,12 @@ Status InitialSyncer::_scheduleWorkAtAndSaveHandle_inlock(
if (_isShuttingDown_inlock()) {
return Status(ErrorCodes::CallbackCanceled,
str::stream() << "failed to schedule work " << name << " at "
- << when.toString()
- << ": initial syncer is shutting down");
+ << when.toString() << ": initial syncer is shutting down");
}
auto result = _exec->scheduleWorkAt(when, std::move(work));
if (!result.isOK()) {
- return result.getStatus().withContext(
- str::stream() << "failed to schedule work " << name << " at " << when.toString());
+ return result.getStatus().withContext(str::stream() << "failed to schedule work " << name
+ << " at " << when.toString());
}
*handle = result.getValue();
return Status::OK();
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index 9bc286759ff..f9b94d7193d 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -299,19 +299,19 @@ protected:
const BSONObj idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoaderMock>> {
- // Get collection info from map.
- const auto collInfo = &_collections[nss];
- if (collInfo->stats->initCalled) {
- log() << "reusing collection during test which may cause problems, ns:" << nss;
- }
- auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
- auto status = localLoader->init(secondaryIndexSpecs);
- if (!status.isOK())
- return status;
- collInfo->loader = localLoader.get();
-
- return std::move(localLoader);
- };
+ // Get collection info from map.
+ const auto collInfo = &_collections[nss];
+ if (collInfo->stats->initCalled) {
+ log() << "reusing collection during test which may cause problems, ns:" << nss;
+ }
+ auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
+ auto status = localLoader->init(secondaryIndexSpecs);
+ if (!status.isOK())
+ return status;
+ collInfo->loader = localLoader.get();
+
+ return std::move(localLoader);
+ };
_storageInterface->upgradeNonReplicatedUniqueIndexesFn = [this](OperationContext* opCtx) {
LockGuard lock(_storageInterfaceWorkDoneMutex);
if (_storageInterfaceWorkDone.upgradeNonReplicatedUniqueIndexesShouldFail) {
@@ -372,17 +372,13 @@ protected:
dataReplicatorExternalState->lastCommittedOpTime = _myLastOpTime;
{
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "myset"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"))
- << "settings"
- << BSON("electionTimeoutMillis" << 10000))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "myset"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("electionTimeoutMillis" << 10000))));
dataReplicatorExternalState->replSetConfigResult = config;
}
_externalState = dataReplicatorExternalState.get();
@@ -1170,14 +1166,14 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughGetBeginFetchingOpTimeSchedu
// We reject the 'find' command for the begin fetching optime and save the request for
// inspection at the end of this test case.
executor::RemoteCommandRequest request;
- _executorProxy->shouldFailScheduleRemoteCommandRequest = [&request](
- const executor::RemoteCommandRequestOnAny& requestToSend) {
- request = {requestToSend, 0};
- auto elem = requestToSend.cmdObj.firstElement();
- return (
- ("find" == elem.fieldNameStringData()) &&
- (NamespaceString::kSessionTransactionsTableNamespace.coll() == elem.valueStringData()));
- };
+ _executorProxy->shouldFailScheduleRemoteCommandRequest =
+ [&request](const executor::RemoteCommandRequestOnAny& requestToSend) {
+ request = {requestToSend, 0};
+ auto elem = requestToSend.cmdObj.firstElement();
+ return (("find" == elem.fieldNameStringData()) &&
+ (NamespaceString::kSessionTransactionsTableNamespace.coll() ==
+ elem.valueStringData()));
+ };
HostAndPort syncSource("localhost", 12345);
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource);
@@ -1260,12 +1256,13 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughLastOplogEntryFetcherSchedul
// We reject the 'find' command on the oplog and save the request for inspection at the end of
// this test case.
executor::RemoteCommandRequest request;
- _executorProxy->shouldFailScheduleRemoteCommandRequest = [&request](
- const executor::RemoteCommandRequestOnAny& requestToSend) {
- request = {requestToSend, 0};
- auto elem = requestToSend.cmdObj.firstElement();
- return (("find" == elem.fieldNameStringData()) && ("oplog.rs" == elem.valueStringData()));
- };
+ _executorProxy->shouldFailScheduleRemoteCommandRequest =
+ [&request](const executor::RemoteCommandRequestOnAny& requestToSend) {
+ request = {requestToSend, 0};
+ auto elem = requestToSend.cmdObj.firstElement();
+ return (("find" == elem.fieldNameStringData()) &&
+ ("oplog.rs" == elem.valueStringData()));
+ };
HostAndPort syncSource("localhost", 12345);
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource);
@@ -1680,8 +1677,7 @@ TEST_F(InitialSyncerTest,
TEST_F(InitialSyncerTest,
InitialSyncerReturnsIncompatibleServerVersionWhenFCVFetcherReturnsUpgradeTargetVersion) {
auto docs = {BSON("_id" << FeatureCompatibilityVersionParser::kParameterName << "version"
- << FeatureCompatibilityVersionParser::kVersion40
- << "targetVersion"
+ << FeatureCompatibilityVersionParser::kVersion40 << "targetVersion"
<< FeatureCompatibilityVersionParser::kVersion42)};
runInitialSyncWithBadFCVResponse(docs, ErrorCodes::IncompatibleServerVersion);
}
@@ -1689,8 +1685,7 @@ TEST_F(InitialSyncerTest,
TEST_F(InitialSyncerTest,
InitialSyncerReturnsIncompatibleServerVersionWhenFCVFetcherReturnsDowngradeTargetVersion) {
auto docs = {BSON("_id" << FeatureCompatibilityVersionParser::kParameterName << "version"
- << FeatureCompatibilityVersionParser::kVersion40
- << "targetVersion"
+ << FeatureCompatibilityVersionParser::kVersion40 << "targetVersion"
<< FeatureCompatibilityVersionParser::kVersion40)};
runInitialSyncWithBadFCVResponse(docs, ErrorCodes::IncompatibleServerVersion);
}
@@ -2241,8 +2236,7 @@ TEST_F(InitialSyncerTest,
<< "dbinfo")
<< BSON("name"
<< "b"))
- << "ok"
- << 1)));
+ << "ok" << 1)));
net->runReadyNetworkOperations();
// Oplog tailing query.
@@ -2609,8 +2603,7 @@ TEST_F(
// Second last oplog entry fetcher.
processSuccessfulLastOplogEntryFetcherResponse({BSON("ts"
<< "not a timestamp"
- << "t"
- << 1)});
+ << "t" << 1)});
// _lastOplogEntryFetcherCallbackAfterCloningData() will shut down the OplogFetcher after
// setting the completion status.
@@ -3226,8 +3219,7 @@ TEST_F(InitialSyncerTest, LastOpTimeShouldBeSetEvenIfNoOperationsAreAppliedAfter
NamespaceString(nss.getCommandNS()),
{BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << nss.ns())})));
+ << "ns" << nss.ns())})));
ASSERT_EQUALS(*_options1.uuid, UUID::parse(request.cmdObj.firstElement()));
ASSERT_EQUALS(nss.db(), request.dbname);
@@ -3960,8 +3952,7 @@ TEST_F(InitialSyncerTest,
NamespaceString(nss.getCommandNS()),
{BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << nss.ns())}));
+ << "ns" << nss.ns())}));
assertRemoteCommandNameEquals("listIndexes", request);
ASSERT_EQUALS(*_options1.uuid, UUID::parse(request.cmdObj.firstElement()));
ASSERT_EQUALS(nss.db(), request.dbname);
@@ -4344,8 +4335,7 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressReturnsCorrectProgress) {
NamespaceString(nss.getCommandNS()),
{BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << nss.ns())}));
+ << "ns" << nss.ns())}));
assertRemoteCommandNameEquals("listIndexes", request);
ASSERT_EQUALS(*_options1.uuid, UUID::parse(request.cmdObj.firstElement()));
ASSERT_EQUALS(nss.db(), request.dbname);
diff --git a/src/mongo/db/repl/is_master_response.cpp b/src/mongo/db/repl/is_master_response.cpp
index e160054208b..06e0d1c1896 100644
--- a/src/mongo/db/repl/is_master_response.cpp
+++ b/src/mongo/db/repl/is_master_response.cpp
@@ -220,8 +220,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Found \"" << kIsReplicaSetFieldName
<< "\" field which should indicate that no valid config "
"is loaded, but we didn't also have an \""
- << kInfoFieldName
- << "\" field as we expected");
+ << kInfoFieldName << "\" field as we expected");
}
}
@@ -248,8 +247,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kHostsFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String)
- << " but found type "
+ << typeName(String) << " but found type "
<< typeName(hostElement.type()));
}
_hosts.push_back(HostAndPort(hostElement.String()));
@@ -269,8 +267,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kPassivesFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String)
- << " but found type "
+ << typeName(String) << " but found type "
<< typeName(passiveElement.type()));
}
_passives.push_back(HostAndPort(passiveElement.String()));
@@ -290,8 +287,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kArbitersFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String)
- << " but found type "
+ << typeName(String) << " but found type "
<< typeName(arbiterElement.type()));
}
_arbiters.push_back(HostAndPort(arbiterElement.String()));
@@ -364,8 +360,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kTagsFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(String)
- << " but found type "
+ << typeName(String) << " but found type "
<< typeName(tagsElement.type()));
}
_tags[tagElement.fieldNameStringData().toString()] = tagElement.String();
@@ -397,8 +392,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastWriteOpTimeFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Object)
- << " but found type "
+ << typeName(Object) << " but found type "
<< typeName(lastWriteOpTimeElement.type()));
}
auto lastWriteOpTime = OpTime::parseFromOplogEntry(lastWriteOpTimeElement.Obj());
@@ -418,8 +412,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastWriteDateFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Date)
- << " but found type "
+ << typeName(Date) << " but found type "
<< typeName(lastWriteDateElement.type()));
}
if (_lastWrite) {
@@ -439,8 +432,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastMajorityWriteOpTimeFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Object)
- << " but found type "
+ << typeName(Object) << " but found type "
<< typeName(lastMajorityWriteOpTimeElement.type()));
}
auto lastMajorityWriteOpTime =
@@ -461,8 +453,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastMajorityWriteDateFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Date)
- << " but found type "
+ << typeName(Date) << " but found type "
<< typeName(lastMajorityWriteDateElement.type()));
}
if (_lastMajorityWrite) {
diff --git a/src/mongo/db/repl/isself.cpp b/src/mongo/db/repl/isself.cpp
index a78298933fd..3d62a2b60b4 100644
--- a/src/mongo/db/repl/isself.cpp
+++ b/src/mongo/db/repl/isself.cpp
@@ -273,8 +273,7 @@ std::vector<std::string> getBoundAddrs(const bool ipv6enabled) {
for (int tries = 0; tries < 3; ++tries) {
err = GetAdaptersAddresses(family,
GAA_FLAG_SKIP_ANYCAST | // only want unicast addrs
- GAA_FLAG_SKIP_MULTICAST |
- GAA_FLAG_SKIP_DNS_SERVER,
+ GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER,
NULL,
adapters,
&adaptersLen);
diff --git a/src/mongo/db/repl/member_config.cpp b/src/mongo/db/repl/member_config.cpp
index 02fb978173e..5ab43763c8c 100644
--- a/src/mongo/db/repl/member_config.cpp
+++ b/src/mongo/db/repl/member_config.cpp
@@ -178,9 +178,9 @@ MemberConfig::MemberConfig(const BSONObj& mcfg, ReplSetTagConfig* tagConfig) {
for (auto&& tag : tagsElement.Obj()) {
if (tag.type() != String) {
uasserted(ErrorCodes::TypeMismatch,
- str::stream() << "tags." << tag.fieldName()
- << " field has non-string value of type "
- << typeName(tag.type()));
+ str::stream()
+ << "tags." << tag.fieldName()
+ << " field has non-string value of type " << typeName(tag.type()));
}
_tags.push_back(tagConfig->makeTag(tag.fieldNameStringData(), tag.valueStringData()));
}
@@ -240,9 +240,9 @@ Status MemberConfig::validate() const {
}
if (_slaveDelay < Seconds(0) || _slaveDelay > kMaxSlaveDelay) {
return Status(ErrorCodes::BadValue,
- str::stream() << kSlaveDelayFieldName << " field value of "
- << durationCount<Seconds>(_slaveDelay)
- << " seconds is out of range");
+ str::stream()
+ << kSlaveDelayFieldName << " field value of "
+ << durationCount<Seconds>(_slaveDelay) << " seconds is out of range");
}
// Check for additional electable requirements, when priority is non zero
if (_priority != 0) {
diff --git a/src/mongo/db/repl/member_config_test.cpp b/src/mongo/db/repl/member_config_test.cpp
index 6176d230463..cf84b37ccdc 100644
--- a/src/mongo/db/repl/member_config_test.cpp
+++ b/src/mongo/db/repl/member_config_test.cpp
@@ -60,8 +60,7 @@ TEST(MemberConfig, ParseFailsWithIllegalFieldName) {
ReplSetTagConfig tagConfig;
ASSERT_THROWS(MemberConfig(BSON("_id" << 0 << "host"
<< "localhost"
- << "frim"
- << 1),
+ << "frim" << 1),
&tagConfig),
ExceptionFor<ErrorCodes::BadValue>);
}
@@ -133,8 +132,7 @@ TEST(MemberConfig, ParseArbiterOnly) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "arbiterOnly"
- << 1.0),
+ << "arbiterOnly" << 1.0),
&tagConfig);
ASSERT_TRUE(mc.isArbiter());
ASSERT_EQUALS(0.0, mc.getPriority());
@@ -142,8 +140,7 @@ TEST(MemberConfig, ParseArbiterOnly) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "arbiterOnly"
- << false),
+ << "arbiterOnly" << false),
&tagConfig);
ASSERT_TRUE(!mc.isArbiter());
ASSERT_EQUALS(1.0, mc.getPriority());
@@ -155,16 +152,14 @@ TEST(MemberConfig, ParseHidden) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "hidden"
- << 1.0),
+ << "hidden" << 1.0),
&tagConfig);
ASSERT_TRUE(mc.isHidden());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "hidden"
- << false),
+ << "hidden" << false),
&tagConfig);
ASSERT_TRUE(!mc.isHidden());
}
@@ -181,16 +176,14 @@ TEST(MemberConfig, ParseBuildIndexes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "buildIndexes"
- << 1.0),
+ << "buildIndexes" << 1.0),
&tagConfig);
ASSERT_TRUE(mc.shouldBuildIndexes());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "buildIndexes"
- << false),
+ << "buildIndexes" << false),
&tagConfig);
ASSERT_TRUE(!mc.shouldBuildIndexes());
}
@@ -201,18 +194,14 @@ TEST(MemberConfig, ParseVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1.0),
+ << "votes" << 1.0),
&tagConfig);
ASSERT_TRUE(mc.isVoter());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0
- << "priority"
- << 0),
+ << "votes" << 0 << "priority" << 0),
&tagConfig);
ASSERT_FALSE(mc.isVoter());
}
@@ -220,38 +209,33 @@ TEST(MemberConfig, ParseVotes) {
// For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1.5),
+ << "votes" << 1.5),
&tagConfig);
ASSERT_TRUE(mc.isVoter());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0.5),
+ << "votes" << 0.5),
&tagConfig);
ASSERT_FALSE(mc.isVoter());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << -0.5),
+ << "votes" << -0.5),
&tagConfig);
ASSERT_FALSE(mc.isVoter());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 2),
+ << "votes" << 2),
&tagConfig);
}
ASSERT_THROWS(MemberConfig(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << Date_t::fromMillisSinceEpoch(2)),
+ << "votes" << Date_t::fromMillisSinceEpoch(2)),
&tagConfig),
ExceptionFor<ErrorCodes::TypeMismatch>);
}
@@ -261,31 +245,27 @@ TEST(MemberConfig, ParsePriority) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1),
+ << "priority" << 1),
&tagConfig);
ASSERT_EQUALS(1.0, mc.getPriority());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0),
+ << "priority" << 0),
&tagConfig);
ASSERT_EQUALS(0.0, mc.getPriority());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 100.8),
+ << "priority" << 100.8),
&tagConfig);
ASSERT_EQUALS(100.8, mc.getPriority());
}
ASSERT_THROWS(MemberConfig(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << Date_t::fromMillisSinceEpoch(2)),
+ << "priority" << Date_t::fromMillisSinceEpoch(2)),
&tagConfig),
ExceptionFor<ErrorCodes::TypeMismatch>);
}
@@ -294,8 +274,7 @@ TEST(MemberConfig, ParseSlaveDelay) {
ReplSetTagConfig tagConfig;
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "slaveDelay"
- << 100),
+ << "slaveDelay" << 100),
&tagConfig);
ASSERT_EQUALS(Seconds(100), mc.getSlaveDelay());
}
@@ -365,14 +344,13 @@ TEST(MemberConfig, DuplicateHorizonNames) {
ASSERT_NOT_EQUALS(s.reason().find("Duplicate horizon name found"), std::string::npos);
}
try {
- MemberConfig(BSON("_id" << 0 << "host"
- << "h"
- << "horizons"
- << BSON("someUniqueHorizonName"
- << "a.host:43"
- << SplitHorizon::kDefaultHorizon
- << "b.host:256")),
- &tagConfig);
+ MemberConfig(
+ BSON("_id" << 0 << "host"
+ << "h"
+ << "horizons"
+ << BSON("someUniqueHorizonName"
+ << "a.host:43" << SplitHorizon::kDefaultHorizon << "b.host:256")),
+ &tagConfig);
ASSERT_TRUE(false); // Should not succeed.
} catch (const ExceptionFor<ErrorCodes::BadValue>& ex) {
const Status& s = ex.toStatus();
@@ -489,8 +467,7 @@ TEST(MemberConfig, ValidateVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1.0),
+ << "votes" << 1.0),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_TRUE(mc.isVoter());
@@ -498,10 +475,7 @@ TEST(MemberConfig, ValidateVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0
- << "priority"
- << 0),
+ << "votes" << 0 << "priority" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -510,8 +484,7 @@ TEST(MemberConfig, ValidateVotes) {
// For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1.5),
+ << "votes" << 1.5),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_TRUE(mc.isVoter());
@@ -519,10 +492,7 @@ TEST(MemberConfig, ValidateVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0.5
- << "priority"
- << 0),
+ << "votes" << 0.5 << "priority" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -530,10 +500,7 @@ TEST(MemberConfig, ValidateVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << -0.5
- << "priority"
- << 0),
+ << "votes" << -0.5 << "priority" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -542,16 +509,14 @@ TEST(MemberConfig, ValidateVotes) {
// Invalid values
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 2),
+ << "votes" << 2),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << -1),
+ << "votes" << -1),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -562,32 +527,28 @@ TEST(MemberConfig, ValidatePriorityRanges) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0),
+ << "priority" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1000),
+ << "priority" << 1000),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << -1),
+ << "priority" << -1),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1001),
+ << "priority" << 1001),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -598,40 +559,28 @@ TEST(MemberConfig, ValidateSlaveDelays) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0
- << "slaveDelay"
- << 0),
+ << "priority" << 0 << "slaveDelay" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0
- << "slaveDelay"
- << 3600 * 10),
+ << "priority" << 0 << "slaveDelay" << 3600 * 10),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0
- << "slaveDelay"
- << -1),
+ << "priority" << 0 << "slaveDelay" << -1),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0
- << "slaveDelay"
- << 3600 * 24 * 400),
+ << "priority" << 0 << "slaveDelay" << 3600 * 24 * 400),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -641,10 +590,7 @@ TEST(MemberConfig, ValidatePriorityAndSlaveDelayRelationship) {
ReplSetTagConfig tagConfig;
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "slaveDelay"
- << 60),
+ << "priority" << 1 << "slaveDelay" << 60),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -654,20 +600,14 @@ TEST(MemberConfig, ValidatePriorityAndHiddenRelationship) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "hidden"
- << true),
+ << "priority" << 1 << "hidden" << true),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "hidden"
- << false),
+ << "priority" << 1 << "hidden" << false),
&tagConfig);
ASSERT_OK(mc.validate());
}
@@ -678,10 +618,7 @@ TEST(MemberConfig, ValidatePriorityAndBuildIndexesRelationship) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "buildIndexes"
- << false),
+ << "priority" << 1 << "buildIndexes" << false),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
@@ -689,10 +626,7 @@ TEST(MemberConfig, ValidatePriorityAndBuildIndexesRelationship) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "buildIndexes"
- << true),
+ << "priority" << 1 << "buildIndexes" << true),
&tagConfig);
ASSERT_OK(mc.validate());
}
@@ -703,42 +637,28 @@ TEST(MemberConfig, ValidateArbiterVotesRelationship) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1
- << "arbiterOnly"
- << true),
+ << "votes" << 1 << "arbiterOnly" << true),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0
- << "priority"
- << 0
- << "arbiterOnly"
- << false),
+ << "votes" << 0 << "priority" << 0 << "arbiterOnly" << false),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1
- << "arbiterOnly"
- << false),
+ << "votes" << 1 << "arbiterOnly" << false),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0
- << "arbiterOnly"
- << true),
+ << "votes" << 0 << "arbiterOnly" << true),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
diff --git a/src/mongo/db/repl/member_data.cpp b/src/mongo/db/repl/member_data.cpp
index 40a081ba6a2..247167bc150 100644
--- a/src/mongo/db/repl/member_data.cpp
+++ b/src/mongo/db/repl/member_data.cpp
@@ -141,8 +141,9 @@ void MemberData::setLastDurableOpTimeAndWallTime(OpTimeAndWallTime opTime, Date_
// TODO(russotto): We think this should never happen, rollback or no rollback. Make this an
// invariant and see what happens.
log() << "Durable progress (" << opTime.opTime << ") is ahead of the applied progress ("
- << _lastAppliedOpTime << ". This is likely due to a "
- "rollback."
+ << _lastAppliedOpTime
+ << ". This is likely due to a "
+ "rollback."
<< " memberid: " << _memberId << _hostAndPort.toString()
<< " previous durable progress: " << _lastDurableOpTime;
} else {
diff --git a/src/mongo/db/repl/mock_repl_coord_server_fixture.h b/src/mongo/db/repl/mock_repl_coord_server_fixture.h
index 9bac2e16d74..7f52f4a3f21 100644
--- a/src/mongo/db/repl/mock_repl_coord_server_fixture.h
+++ b/src/mongo/db/repl/mock_repl_coord_server_fixture.h
@@ -39,7 +39,7 @@ class OperationContext;
namespace repl {
class OplogEntry;
class StorageInterfaceMock;
-}
+} // namespace repl
/**
* This is a basic fixture that is backed by an ephemeral storage engine and a mock replication
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index d9fc1390de3..35719c01913 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -409,7 +409,7 @@ OplogDocWriter _logOpWriter(OperationContext* opCtx,
return OplogDocWriter(OplogDocWriter(b.obj(), obj));
}
-} // end anon namespace
+} // namespace
/* we write to local.oplog.rs:
{ ts : ..., h: ..., v: ..., op: ..., etc }
@@ -429,8 +429,8 @@ OplogDocWriter _logOpWriter(OperationContext* opCtx,
* writers - an array with size nDocs of DocWriter objects.
* timestamps - an array with size nDocs of respective Timestamp objects for each DocWriter.
* oplogCollection - collection to be written to.
- * finalOpTime - the OpTime of the last DocWriter object.
- * wallTime - the wall clock time of the corresponding oplog entry.
+ * finalOpTime - the OpTime of the last DocWriter object.
+ * wallTime - the wall clock time of the corresponding oplog entry.
*/
void _logOpsInner(OperationContext* opCtx,
const NamespaceString& nss,
@@ -459,8 +459,7 @@ void _logOpsInner(OperationContext* opCtx,
// are logging within one WriteUnitOfWork.
invariant(finalOpTime.getTimestamp() <= *commitTime,
str::stream() << "Final OpTime: " << finalOpTime.toString()
- << ". Commit Time: "
- << commitTime->toString());
+ << ". Commit Time: " << commitTime->toString());
}
// Optionally hang before advancing lastApplied.
@@ -495,12 +494,8 @@ OpTime logOp(OperationContext* opCtx,
// All collections should have UUIDs now, so all insert, update, and delete oplog entries should
// also have uuids. Some no-op (n) and command (c) entries may still elide the uuid field.
invariant(uuid || 'n' == *opstr || 'c' == *opstr,
- str::stream() << "Expected uuid for logOp with opstr: " << opstr << ", nss: "
- << nss.ns()
- << ", obj: "
- << obj
- << ", os: "
- << o2);
+ str::stream() << "Expected uuid for logOp with opstr: " << opstr
+ << ", nss: " << nss.ns() << ", obj: " << obj << ", os: " << o2);
auto replCoord = ReplicationCoordinator::get(opCtx);
// For commands, the test below is on the command ns and therefore does not check for
@@ -629,7 +624,7 @@ std::vector<OpTime> logInsertOps(OperationContext* opCtx,
sleepmillis(numMillis);
}
- std::unique_ptr<DocWriter const* []> basePtrs(new DocWriter const*[count]);
+ std::unique_ptr<DocWriter const*[]> basePtrs(new DocWriter const*[count]);
for (size_t i = 0; i < count; i++) {
basePtrs[i] = &writers[i];
}
@@ -656,7 +651,7 @@ long long getNewOplogSizeBytes(OperationContext* opCtx, const ReplSettings& repl
LOG(3) << "32bit system; choosing " << sz << " bytes oplog";
return sz;
}
-// First choose a minimum size.
+ // First choose a minimum size.
#if defined(__APPLE__)
// typically these are desktops (dev machines), so keep it smallish
@@ -786,8 +781,7 @@ std::pair<OptionalCollectionUUID, NamespaceString> parseCollModUUIDAndNss(Operat
const auto nsByUUID = catalog.lookupNSSByUUID(uuid);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply operation due to missing collection (" << uuid
- << "): "
- << redact(cmd.toString()),
+ << "): " << redact(cmd.toString()),
nsByUUID);
return std::pair<OptionalCollectionUUID, NamespaceString>(uuid, *nsByUUID);
}
@@ -1342,8 +1336,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
collection = catalog.lookupCollectionByUUID(uuid);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply operation due to missing collection (" << uuid
- << "): "
- << redact(op.toString()),
+ << "): " << redact(op.toString()),
collection);
requestNss = collection->ns();
dassert(opCtx->lockState()->isCollectionLockedForMode(
diff --git a/src/mongo/db/repl/oplog_buffer_collection.cpp b/src/mongo/db/repl/oplog_buffer_collection.cpp
index 39c5ab03036..cfea973d17d 100644
--- a/src/mongo/db/repl/oplog_buffer_collection.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection.cpp
@@ -66,17 +66,16 @@ std::tuple<BSONObj, Timestamp, std::size_t> OplogBufferCollection::addIdToDocume
const BSONObj& orig, const Timestamp& lastTimestamp, std::size_t sentinelCount) {
if (orig.isEmpty()) {
return std::make_tuple(
- BSON(kIdFieldName << BSON(
- kTimestampFieldName << lastTimestamp << kSentinelFieldName
- << static_cast<long long>(sentinelCount + 1))),
+ BSON(kIdFieldName << BSON(kTimestampFieldName
+ << lastTimestamp << kSentinelFieldName
+ << static_cast<long long>(sentinelCount + 1))),
lastTimestamp,
sentinelCount + 1);
}
const auto ts = orig[kTimestampFieldName].timestamp();
invariant(!ts.isNull());
auto doc = BSON(kIdFieldName << BSON(kTimestampFieldName << ts << kSentinelFieldName << 0)
- << kOplogEntryFieldName
- << orig);
+ << kOplogEntryFieldName << orig);
return std::make_tuple(doc, ts, 0);
}
diff --git a/src/mongo/db/repl/oplog_buffer_collection_test.cpp b/src/mongo/db/repl/oplog_buffer_collection_test.cpp
index 0d544104fc6..64ee03f9d0f 100644
--- a/src/mongo/db/repl/oplog_buffer_collection_test.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection_test.cpp
@@ -111,12 +111,9 @@ NamespaceString makeNamespace(const T& t, const char* suffix = "") {
BSONObj makeOplogEntry(int t) {
return BSON("ts" << Timestamp(t, t) << "ns"
<< "a.a"
- << "v"
- << 2
- << "op"
+ << "v" << 2 << "op"
<< "i"
- << "o"
- << BSON("_id" << t << "a" << t));
+ << "o" << BSON("_id" << t << "a" << t));
}
TEST_F(OplogBufferCollectionTest, DefaultNamespace) {
@@ -623,7 +620,9 @@ TEST_F(OplogBufferCollectionTest, PopAndPeekReturnDocumentsInOrder) {
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(1), makeOplogEntry(2), makeOplogEntry(3),
+ makeOplogEntry(1),
+ makeOplogEntry(2),
+ makeOplogEntry(3),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end());
@@ -666,7 +665,9 @@ TEST_F(OplogBufferCollectionTest, LastObjectPushedReturnsNewestOplogEntry) {
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(1), makeOplogEntry(2), makeOplogEntry(3),
+ makeOplogEntry(1),
+ makeOplogEntry(2),
+ makeOplogEntry(3),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end());
@@ -702,7 +703,9 @@ TEST_F(OplogBufferCollectionTest,
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(3), makeOplogEntry(4), makeOplogEntry(5),
+ makeOplogEntry(3),
+ makeOplogEntry(4),
+ makeOplogEntry(5),
};
ASSERT_BSONOBJ_EQ(*oplogBuffer.lastObjectPushed(_opCtx.get()), secondDoc);
@@ -929,7 +932,12 @@ void _testPushSentinelsProperly(
OplogBufferCollection oplogBuffer(storageInterface, nss);
oplogBuffer.startup(opCtx);
const std::vector<BSONObj> oplog = {
- BSONObj(), makeOplogEntry(1), BSONObj(), BSONObj(), makeOplogEntry(2), BSONObj(),
+ BSONObj(),
+ makeOplogEntry(1),
+ BSONObj(),
+ BSONObj(),
+ makeOplogEntry(2),
+ BSONObj(),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
pushDocsFn(opCtx, &oplogBuffer, oplog);
@@ -1016,7 +1024,8 @@ DEATH_TEST_F(
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(2), makeOplogEntry(1),
+ makeOplogEntry(2),
+ makeOplogEntry(1),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end());
@@ -1028,7 +1037,10 @@ TEST_F(OplogBufferCollectionTest, SentinelInMiddleIsReturnedInOrder) {
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(1), makeOplogEntry(2), BSONObj(), makeOplogEntry(3),
+ makeOplogEntry(1),
+ makeOplogEntry(2),
+ BSONObj(),
+ makeOplogEntry(3),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[0]);
@@ -1149,7 +1161,12 @@ TEST_F(OplogBufferCollectionTest, MultipleSentinelsAreReturnedInOrder) {
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- BSONObj(), makeOplogEntry(1), BSONObj(), BSONObj(), makeOplogEntry(2), BSONObj(),
+ BSONObj(),
+ makeOplogEntry(1),
+ BSONObj(),
+ BSONObj(),
+ makeOplogEntry(2),
+ BSONObj(),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.cbegin(), oplog.cend());
diff --git a/src/mongo/db/repl/oplog_entry.cpp b/src/mongo/db/repl/oplog_entry.cpp
index 6d549dd1965..38a71114cf1 100644
--- a/src/mongo/db/repl/oplog_entry.cpp
+++ b/src/mongo/db/repl/oplog_entry.cpp
@@ -81,8 +81,7 @@ OplogEntry::CommandType parseCommandType(const BSONObj& objectField) {
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "Unknown oplog entry command type: " << commandString
- << " Object field: "
- << redact(objectField));
+ << " Object field: " << redact(objectField));
}
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index 9c4df1a1bec..3162319ab2b 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -161,11 +161,10 @@ Status checkRemoteOplogStart(const Fetcher::Documents& documents,
// sync source is now behind us, choose a new sync source to prevent going into rollback.
if (remoteLastOpApplied && (*remoteLastOpApplied < lastFetched)) {
return Status(ErrorCodes::InvalidSyncSource,
- str::stream() << "Sync source's last applied OpTime "
- << remoteLastOpApplied->toString()
- << " is older than our last fetched OpTime "
- << lastFetched.toString()
- << ". Choosing new sync source.");
+ str::stream()
+ << "Sync source's last applied OpTime " << remoteLastOpApplied->toString()
+ << " is older than our last fetched OpTime " << lastFetched.toString()
+ << ". Choosing new sync source.");
}
// If 'requireFresherSyncSource' is true, we must check that the sync source's
@@ -181,8 +180,7 @@ Status checkRemoteOplogStart(const Fetcher::Documents& documents,
return Status(ErrorCodes::InvalidSyncSource,
str::stream()
<< "Sync source must be ahead of me. My last fetched oplog optime: "
- << lastFetched.toString()
- << ", latest oplog optime of sync source: "
+ << lastFetched.toString() << ", latest oplog optime of sync source: "
<< remoteLastOpApplied->toString());
}
@@ -202,9 +200,7 @@ Status checkRemoteOplogStart(const Fetcher::Documents& documents,
return Status(ErrorCodes::InvalidBSON,
str::stream() << "our last optime fetched: " << lastFetched.toString()
<< ". failed to parse optime from first oplog on source: "
- << o.toString()
- << ": "
- << opTimeResult.getStatus().toString());
+ << o.toString() << ": " << opTimeResult.getStatus().toString());
}
auto opTime = opTimeResult.getValue();
if (opTime != lastFetched) {
@@ -289,15 +285,9 @@ StatusWith<OplogFetcher::DocumentsInfo> OplogFetcher::validateDocuments(
if (lastTS >= docTS) {
return Status(ErrorCodes::OplogOutOfOrder,
str::stream() << "Out of order entries in oplog. lastTS: "
- << lastTS.toString()
- << " outOfOrderTS:"
- << docTS.toString()
- << " in batch with "
- << info.networkDocumentCount
- << "docs; first-batch:"
- << first
- << ", doc:"
- << doc);
+ << lastTS.toString() << " outOfOrderTS:" << docTS.toString()
+ << " in batch with " << info.networkDocumentCount
+ << "docs; first-batch:" << first << ", doc:" << doc);
}
lastTS = docTS;
}
diff --git a/src/mongo/db/repl/oplog_interface_mock.cpp b/src/mongo/db/repl/oplog_interface_mock.cpp
index 6352fa7566a..95930bf6d62 100644
--- a/src/mongo/db/repl/oplog_interface_mock.cpp
+++ b/src/mongo/db/repl/oplog_interface_mock.cpp
@@ -90,8 +90,7 @@ public:
str::stream()
<< "oplog no longer contains the complete write history of this "
"transaction, log with opTime "
- << _nextOpTime.toBSON()
- << " cannot be found");
+ << _nextOpTime.toBSON() << " cannot be found");
}
// We shouldn't get any other error.
MONGO_UNREACHABLE;
diff --git a/src/mongo/db/repl/oplog_test.cpp b/src/mongo/db/repl/oplog_test.cpp
index b95f8d2d4fc..00f76f96c4d 100644
--- a/src/mongo/db/repl/oplog_test.cpp
+++ b/src/mongo/db/repl/oplog_test.cpp
@@ -124,9 +124,9 @@ TEST_F(OplogTest, LogOpReturnsOpTimeOnSuccessfulInsertIntoOplogCollection) {
<< "OpTime returned from logOp() did not match that in the oplog entry written to the "
"oplog: "
<< oplogEntry.toBSON();
- ASSERT(OpTypeEnum::kNoop == oplogEntry.getOpType()) << "Expected 'n' op type but found '"
- << OpType_serializer(oplogEntry.getOpType())
- << "' instead: " << oplogEntry.toBSON();
+ ASSERT(OpTypeEnum::kNoop == oplogEntry.getOpType())
+ << "Expected 'n' op type but found '" << OpType_serializer(oplogEntry.getOpType())
+ << "' instead: " << oplogEntry.toBSON();
ASSERT_BSONOBJ_EQ(msgObj, oplogEntry.getObject());
// Ensure that the msg optime returned is the same as the last optime in the ReplClientInfo.
diff --git a/src/mongo/db/repl/optime_extract_test.cpp b/src/mongo/db/repl/optime_extract_test.cpp
index d1e2b0d7e49..9192738a31c 100644
--- a/src/mongo/db/repl/optime_extract_test.cpp
+++ b/src/mongo/db/repl/optime_extract_test.cpp
@@ -51,8 +51,7 @@ TEST(ExtractBSON, ExtractOpTimeField) {
// Missing timestamp field.
obj = BSON("a" << BSON("ts"
<< "notATimestamp"
- << "t"
- << 2));
+ << "t" << 2));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, bsonExtractOpTimeField(obj, "a", &opTime));
// Wrong typed timestamp field.
obj = BSON("a" << BSON("t" << 2));
diff --git a/src/mongo/db/repl/read_concern_args.cpp b/src/mongo/db/repl/read_concern_args.cpp
index 89545a0fb4a..5ec5ae968c1 100644
--- a/src/mongo/db/repl/read_concern_args.cpp
+++ b/src/mongo/db/repl/read_concern_args.cpp
@@ -202,23 +202,20 @@ Status ReadConcernArgs::initialize(const BSONElement& readConcernElem) {
} else {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Unrecognized option in " << kReadConcernFieldName
- << ": "
- << fieldName);
+ << ": " << fieldName);
}
}
if (_afterClusterTime && _opTime) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Can not specify both " << kAfterClusterTimeFieldName
- << " and "
- << kAfterOpTimeFieldName);
+ << " and " << kAfterOpTimeFieldName);
}
if (_afterClusterTime && _atClusterTime) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Can not specify both " << kAfterClusterTimeFieldName
- << " and "
- << kAtClusterTimeFieldName);
+ << " and " << kAtClusterTimeFieldName);
}
// Note: 'available' should not be used with after cluster time, as cluster time can wait for
@@ -228,30 +225,24 @@ Status ReadConcernArgs::initialize(const BSONElement& readConcernElem) {
getLevel() != ReadConcernLevel::kLocalReadConcern &&
getLevel() != ReadConcernLevel::kSnapshotReadConcern) {
return Status(ErrorCodes::InvalidOptions,
- str::stream() << kAfterClusterTimeFieldName << " field can be set only if "
- << kLevelFieldName
- << " is equal to "
- << kMajorityReadConcernStr
- << ", "
- << kLocalReadConcernStr
- << ", or "
- << kSnapshotReadConcernStr);
+ str::stream()
+ << kAfterClusterTimeFieldName << " field can be set only if "
+ << kLevelFieldName << " is equal to " << kMajorityReadConcernStr << ", "
+ << kLocalReadConcernStr << ", or " << kSnapshotReadConcernStr);
}
if (_opTime && getLevel() == ReadConcernLevel::kSnapshotReadConcern) {
return Status(ErrorCodes::InvalidOptions,
- str::stream() << kAfterOpTimeFieldName << " field cannot be set if "
- << kLevelFieldName
- << " is equal to "
- << kSnapshotReadConcernStr);
+ str::stream()
+ << kAfterOpTimeFieldName << " field cannot be set if " << kLevelFieldName
+ << " is equal to " << kSnapshotReadConcernStr);
}
if (_atClusterTime && getLevel() != ReadConcernLevel::kSnapshotReadConcern) {
return Status(ErrorCodes::InvalidOptions,
- str::stream() << kAtClusterTimeFieldName << " field can be set only if "
- << kLevelFieldName
- << " is equal to "
- << kSnapshotReadConcernStr);
+ str::stream()
+ << kAtClusterTimeFieldName << " field can be set only if "
+ << kLevelFieldName << " is equal to " << kSnapshotReadConcernStr);
}
if (_afterClusterTime && _afterClusterTime == LogicalTime::kUninitialized) {
@@ -294,8 +285,7 @@ Status ReadConcernArgs::upconvertReadConcernLevelToSnapshot() {
if (_opTime) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Cannot upconvert the readConcern level to 'snapshot' when '"
- << kAfterOpTimeFieldName
- << "' is provided");
+ << kAfterOpTimeFieldName << "' is provided");
}
_originalLevel = _level;
diff --git a/src/mongo/db/repl/read_concern_args_test.cpp b/src/mongo/db/repl/read_concern_args_test.cpp
index ed6ec48875c..d6907a31f26 100644
--- a/src/mongo/db/repl/read_concern_args_test.cpp
+++ b/src/mongo/db/repl/read_concern_args_test.cpp
@@ -39,13 +39,12 @@ namespace {
TEST(ReadAfterParse, OpTimeOnly) {
ReadConcernArgs readConcern;
- ASSERT_OK(readConcern.initialize(BSON(
- "find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
- << 2)))));
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30)
+ << OpTime::kTermFieldName << 2)))));
ASSERT_TRUE(readConcern.getArgsOpTime());
ASSERT_TRUE(!readConcern.getArgsAfterClusterTime());
@@ -59,8 +58,7 @@ TEST(ReadAfterParse, AfterClusterTimeOnly) {
ReadConcernArgs readConcern;
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterClusterTimeFieldName
<< afterClusterTime.asTimestamp()))));
auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
@@ -73,13 +71,12 @@ TEST(ReadAfterParse, AfterClusterTimeAndLevelLocal) {
ReadConcernArgs readConcern;
// Must have level=majority
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "local"))));
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "local"))));
auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
ASSERT_TRUE(argsAfterClusterTime);
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -91,13 +88,12 @@ TEST(ReadAfterParse, AfterClusterTimeAndLevelMajority) {
ReadConcernArgs readConcern;
// Must have level=majority
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "majority"))));
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "majority"))));
auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
ASSERT_TRUE(argsAfterClusterTime);
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -108,13 +104,12 @@ TEST(ReadAfterParse, AfterClusterTimeAndLevelMajority) {
TEST(ReadAfterParse, AfterClusterTimeAndLevelSnapshot) {
ReadConcernArgs readConcern;
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
ASSERT_TRUE(argsAfterClusterTime);
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -127,8 +122,7 @@ TEST(ReadAfterParse, AtClusterTimeOnly) {
auto atClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_EQ(ErrorCodes::InvalidOptions,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAtClusterTimeFieldName
<< atClusterTime.asTimestamp()))));
}
@@ -136,13 +130,12 @@ TEST(ReadAfterParse, AtClusterTimeOnly) {
TEST(ReadAfterParse, AtClusterTimeAndLevelSnapshot) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
auto argsAtClusterTime = readConcern.getArgsAtClusterTime();
ASSERT_TRUE(argsAtClusterTime);
ASSERT_FALSE(readConcern.getArgsOpTime());
@@ -153,40 +146,37 @@ TEST(ReadAfterParse, AtClusterTimeAndLevelSnapshot) {
TEST(ReadAfterParse, AtClusterTimeAndLevelMajority) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "majority"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "majority"))));
}
TEST(ReadAfterParse, AtClusterTimeAndLevelLocal) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "local"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "local"))));
}
TEST(ReadAfterParse, AtClusterTimeAndLevelAvailable) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "available"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "available"))));
}
TEST(ReadAfterParse, AtClusterTimeAndLevelLinearizable) {
@@ -194,8 +184,7 @@ TEST(ReadAfterParse, AtClusterTimeAndLevelLinearizable) {
auto atClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_EQ(ErrorCodes::InvalidOptions,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAtClusterTimeFieldName
<< atClusterTime.asTimestamp()
<< ReadConcernArgs::kLevelFieldName
@@ -206,8 +195,7 @@ TEST(ReadAfterParse, LevelMajorityOnly) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "majority"))));
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -219,8 +207,7 @@ TEST(ReadAfterParse, LevelSnapshotOnly) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "snapshot"))));
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -234,15 +221,12 @@ TEST(ReadAfterParse, ReadCommittedFullSpecification) {
auto afterClusterTime = LogicalTime(Timestamp(100, 200));
ASSERT_NOT_OK(readConcern.initialize(BSON(
"find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName
<< BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
<< 2)
- << ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "majority"))));
+ << ReadConcernArgs::kAfterClusterTimeFieldName << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "majority"))));
}
TEST(ReadAfterParse, Empty) {
@@ -257,58 +241,51 @@ TEST(ReadAfterParse, Empty) {
TEST(ReadAfterParse, BadRootType) {
ReadConcernArgs readConcern;
- ASSERT_NOT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << "x")));
+ ASSERT_NOT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName << "x")));
}
TEST(ReadAfterParse, BadAtClusterTimeType) {
ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::TypeMismatch,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << 2
- << ReadConcernArgs::kLevelFieldName
+ << 2 << ReadConcernArgs::kLevelFieldName
<< "snapshot"))));
}
TEST(ReadAfterParse, BadAtClusterTimeValue) {
ReadConcernArgs readConcern;
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << LogicalTime::kUninitialized.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << LogicalTime::kUninitialized.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
}
TEST(ReadAfterParse, BadOpTimeType) {
ReadConcernArgs readConcern;
ASSERT_NOT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName << 2))));
}
TEST(ReadAfterParse, OpTimeNotNeededForValidReadConcern) {
ReadConcernArgs readConcern;
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSONObj())));
}
TEST(ReadAfterParse, NoOpTimeTS) {
ReadConcernArgs readConcern;
ASSERT_NOT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName
<< BSON(OpTime::kTimestampFieldName << 2)))));
}
@@ -316,40 +293,36 @@ TEST(ReadAfterParse, NoOpTimeTS) {
TEST(ReadAfterParse, NoOpTimeTerm) {
ReadConcernArgs readConcern;
ASSERT_NOT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName
<< BSON(OpTime::kTermFieldName << 2)))));
}
TEST(ReadAfterParse, BadOpTimeTSType) {
ReadConcernArgs readConcern;
- ASSERT_NOT_OK(readConcern.initialize(
- BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << BSON("x" << 1) << OpTime::kTermFieldName
- << 2)))));
+ ASSERT_NOT_OK(readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << BSON("x" << 1)
+ << OpTime::kTermFieldName << 2)))));
}
TEST(ReadAfterParse, BadOpTimeTermType) {
ReadConcernArgs readConcern;
- ASSERT_NOT_OK(readConcern.initialize(BSON(
- "find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(1, 0) << OpTime::kTermFieldName
- << "y")))));
+ ASSERT_NOT_OK(readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(1, 0)
+ << OpTime::kTermFieldName << "y")))));
}
TEST(ReadAfterParse, BadLevelType) {
ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::TypeMismatch,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << 7))));
}
@@ -357,8 +330,7 @@ TEST(ReadAfterParse, BadLevelValue) {
ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::FailedToParse,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName
<< "seven is not a real level"))));
}
@@ -367,39 +339,35 @@ TEST(ReadAfterParse, BadOption) {
ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::InvalidOptions,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON("asdf" << 1))));
}
TEST(ReadAfterParse, AtClusterTimeAndAfterClusterTime) {
ReadConcernArgs readConcern;
auto clusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << clusterTime.asTimestamp()
- << ReadConcernArgs::kAfterClusterTimeFieldName
- << clusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << clusterTime.asTimestamp()
+ << ReadConcernArgs::kAfterClusterTimeFieldName
+ << clusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
}
TEST(ReadAfterParse, AfterOpTimeAndLevelSnapshot) {
ReadConcernArgs readConcern;
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName
- << Timestamp(20, 30)
- << OpTime::kTermFieldName
- << 2)
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30) << OpTime::kTermFieldName
+ << 2)
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
}
TEST(ReadAfterSerialize, Empty) {
@@ -430,10 +398,10 @@ TEST(ReadAfterSerialize, AfterOpTimeOnly) {
ReadConcernArgs readConcern(OpTime(Timestamp(20, 30), 2), boost::none);
readConcern.appendInfo(&builder);
- BSONObj expectedObj(BSON(
- ReadConcernArgs::kReadConcernFieldName << BSON(
- ReadConcernArgs::kAfterOpTimeFieldName << BSON(
- OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
+ BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName << BSON(
+ OpTime::kTimestampFieldName
+ << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
@@ -455,11 +423,10 @@ TEST(ReadAfterSerialize, iAfterCLusterTimeAndLevel) {
ReadConcernArgs readConcern(afterClusterTime, ReadConcernLevel::kMajorityReadConcern);
readConcern.appendInfo(&builder);
- BSONObj expectedObj(
- BSON(ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kLevelFieldName << "majority"
- << ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp())));
+ BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName
+ << "majority" << ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp())));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
@@ -470,13 +437,11 @@ TEST(ReadAfterSerialize, AfterOpTimeAndLevel) {
ReadConcernLevel::kMajorityReadConcern);
readConcern.appendInfo(&builder);
- BSONObj expectedObj(BSON(
- ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kLevelFieldName
- << "majority"
- << ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
- << 2))));
+ BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName << BSON(
+ ReadConcernArgs::kLevelFieldName
+ << "majority" << ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
@@ -486,8 +451,7 @@ TEST(ReadAfterSerialize, AtClusterTimeAndLevelSnapshot) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName
<< "snapshot"
<< ReadConcernArgs::kAtClusterTimeFieldName
@@ -495,11 +459,10 @@ TEST(ReadAfterSerialize, AtClusterTimeAndLevelSnapshot) {
readConcern.appendInfo(&builder);
- BSONObj expectedObj(
- BSON(ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kLevelFieldName << "snapshot"
- << ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp())));
+ BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName
+ << "snapshot" << ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp())));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
@@ -516,8 +479,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, EmptyLevel) {
TEST(UpconvertReadConcernLevelToSnapshot, LevelLocal) {
ReadConcernArgs readConcern;
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "local"))));
ASSERT(ReadConcernLevel::kLocalReadConcern == readConcern.getLevel());
@@ -530,8 +492,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelMajority) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "majority"))));
ASSERT(ReadConcernLevel::kMajorityReadConcern == readConcern.getLevel());
@@ -544,8 +505,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelSnapshot) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "snapshot"))));
ASSERT(ReadConcernLevel::kSnapshotReadConcern == readConcern.getLevel());
@@ -558,8 +518,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelSnapshotWithAtClusterTime) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName
<< "snapshot"
<< ReadConcernArgs::kAtClusterTimeFieldName
@@ -577,8 +536,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, AfterClusterTime) {
ReadConcernArgs readConcern;
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterClusterTimeFieldName
<< afterClusterTime.asTimestamp()))));
ASSERT(ReadConcernLevel::kLocalReadConcern == readConcern.getLevel());
@@ -594,8 +552,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelAvailable) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "available"))));
ASSERT(ReadConcernLevel::kAvailableReadConcern == readConcern.getLevel());
@@ -608,8 +565,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelLinearizable) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "linearizable"))));
ASSERT(ReadConcernLevel::kLinearizableReadConcern == readConcern.getLevel());
@@ -620,13 +576,12 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelLinearizable) {
TEST(UpconvertReadConcernLevelToSnapshot, AfterOpTime) {
ReadConcernArgs readConcern;
- ASSERT_OK(readConcern.initialize(BSON(
- "find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
- << 2)))));
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30)
+ << OpTime::kTermFieldName << 2)))));
ASSERT(ReadConcernLevel::kLocalReadConcern == readConcern.getLevel());
ASSERT_TRUE(readConcern.getArgsOpTime());
diff --git a/src/mongo/db/repl/repl_set_config.cpp b/src/mongo/db/repl/repl_set_config.cpp
index 7614bd7ff7b..db794713203 100644
--- a/src/mongo/db/repl/repl_set_config.cpp
+++ b/src/mongo/db/repl/repl_set_config.cpp
@@ -138,17 +138,16 @@ Status ReplSetConfig::_initialize(const BSONObj& cfg, bool forInitiate, OID defa
if (memberElement.type() != Object) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected type of " << kMembersFieldName << "."
- << memberElement.fieldName()
- << " to be Object, but found "
+ << memberElement.fieldName() << " to be Object, but found "
<< typeName(memberElement.type()));
}
const auto& memberBSON = memberElement.Obj();
try {
_members.emplace_back(memberBSON, &_tagConfig);
} catch (const DBException& ex) {
- return Status(
- ErrorCodes::InvalidReplicaSetConfig,
- str::stream() << ex.toStatus().toString() << " for member:" << memberBSON);
+ return Status(ErrorCodes::InvalidReplicaSetConfig,
+ str::stream()
+ << ex.toStatus().toString() << " for member:" << memberBSON);
}
}
@@ -348,43 +347,35 @@ Status ReplSetConfig::_parseSettingsSubdocument(const BSONObj& settings) {
if (_customWriteConcernModes.find(modeElement.fieldNameStringData()) !=
_customWriteConcernModes.end()) {
return Status(ErrorCodes::Error(51001),
- str::stream() << kSettingsFieldName << '.' << kGetLastErrorModesFieldName
- << " contains multiple fields named "
- << modeElement.fieldName());
+ str::stream()
+ << kSettingsFieldName << '.' << kGetLastErrorModesFieldName
+ << " contains multiple fields named " << modeElement.fieldName());
}
if (modeElement.type() != Object) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Expected " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName
- << '.'
- << modeElement.fieldName()
- << " to be an Object, not "
- << typeName(modeElement.type()));
+ str::stream()
+ << "Expected " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName << '.' << modeElement.fieldName()
+ << " to be an Object, not " << typeName(modeElement.type()));
}
ReplSetTagPattern pattern = _tagConfig.makePattern();
for (auto&& constraintElement : modeElement.Obj()) {
if (!constraintElement.isNumber()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Expected " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName
- << '.'
- << modeElement.fieldName()
- << '.'
- << constraintElement.fieldName()
- << " to be a number, not "
- << typeName(constraintElement.type()));
+ str::stream()
+ << "Expected " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName << '.' << modeElement.fieldName()
+ << '.' << constraintElement.fieldName() << " to be a number, not "
+ << typeName(constraintElement.type()));
}
const int minCount = constraintElement.numberInt();
if (minCount <= 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Value of " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName
- << '.'
- << modeElement.fieldName()
- << '.'
- << constraintElement.fieldName()
- << " must be positive, but found "
- << minCount);
+ str::stream()
+ << "Value of " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName << '.' << modeElement.fieldName()
+ << '.' << constraintElement.fieldName()
+ << " must be positive, but found " << minCount);
}
status = _tagConfig.addTagCountConstraintToPattern(
&pattern, constraintElement.fieldNameStringData(), minCount);
@@ -420,8 +411,7 @@ Status ReplSetConfig::validate() const {
if (_replSetName.empty()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Replica set configuration must have non-empty "
- << kIdFieldName
- << " field");
+ << kIdFieldName << " field");
}
if (_heartbeatInterval < Milliseconds(0)) {
return Status(ErrorCodes::BadValue,
@@ -506,41 +496,22 @@ Status ReplSetConfig::validate() const {
const MemberConfig& memberJ = _members[j];
if (memberI.getId() == memberJ.getId()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Found two member configurations with same "
- << MemberConfig::kIdFieldName
- << " field, "
- << kMembersFieldName
- << "."
- << i
- << "."
- << MemberConfig::kIdFieldName
- << " == "
- << kMembersFieldName
- << "."
- << j
- << "."
- << MemberConfig::kIdFieldName
- << " == "
- << memberI.getId());
+ str::stream()
+ << "Found two member configurations with same "
+ << MemberConfig::kIdFieldName << " field, " << kMembersFieldName
+ << "." << i << "." << MemberConfig::kIdFieldName
+ << " == " << kMembersFieldName << "." << j << "."
+ << MemberConfig::kIdFieldName << " == " << memberI.getId());
}
if (memberI.getHostAndPort() == memberJ.getHostAndPort()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Found two member configurations with same "
- << MemberConfig::kHostFieldName
- << " field, "
- << kMembersFieldName
- << "."
- << i
- << "."
- << MemberConfig::kHostFieldName
- << " == "
- << kMembersFieldName
- << "."
- << j
- << "."
- << MemberConfig::kHostFieldName
- << " == "
- << memberI.getHostAndPort().toString());
+ str::stream()
+ << "Found two member configurations with same "
+ << MemberConfig::kHostFieldName << " field, " << kMembersFieldName
+ << "." << i << "." << MemberConfig::kHostFieldName
+ << " == " << kMembersFieldName << "." << j << "."
+ << MemberConfig::kHostFieldName
+ << " == " << memberI.getHostAndPort().toString());
}
}
}
@@ -593,9 +564,7 @@ Status ReplSetConfig::validate() const {
str::stream()
<< "Either all host names in a replica set configuration must be localhost "
"references, or none must be; found "
- << localhostCount
- << " out of "
- << _members.size());
+ << localhostCount << " out of " << _members.size());
}
if (voterCount > kMaxVotingMembers || voterCount == 0) {
@@ -636,9 +605,9 @@ Status ReplSetConfig::validate() const {
}
if (_protocolVersion != 1) {
return Status(ErrorCodes::BadValue,
- str::stream() << kProtocolVersionFieldName
- << " of 1 is the only supported value. Found: "
- << _protocolVersion);
+ str::stream()
+ << kProtocolVersionFieldName
+ << " of 1 is the only supported value. Found: " << _protocolVersion);
}
if (_configServer) {
@@ -708,8 +677,7 @@ Status ReplSetConfig::checkIfWriteConcernCanBeSatisfied(
// write concern mode.
return Status(ErrorCodes::UnsatisfiableWriteConcern,
str::stream() << "Not enough nodes match write concern mode \""
- << writeConcern.wMode
- << "\"");
+ << writeConcern.wMode << "\"");
} else {
int nodesRemaining = writeConcern.wNumNodes;
for (size_t j = 0; j < _members.size(); ++j) {
diff --git a/src/mongo/db/repl/repl_set_config_checks.cpp b/src/mongo/db/repl/repl_set_config_checks.cpp
index 14cc8e99e61..5c0eeecdb97 100644
--- a/src/mongo/db/repl/repl_set_config_checks.cpp
+++ b/src/mongo/db/repl/repl_set_config_checks.cpp
@@ -63,10 +63,8 @@ StatusWith<int> findSelfInConfig(ReplicationCoordinatorExternalState* externalSt
if (meConfigs.empty()) {
return StatusWith<int>(ErrorCodes::NodeNotFound,
str::stream() << "No host described in new configuration "
- << newConfig.getConfigVersion()
- << " for replica set "
- << newConfig.getReplSetName()
- << " maps to this node");
+ << newConfig.getConfigVersion() << " for replica set "
+ << newConfig.getReplSetName() << " maps to this node");
}
if (meConfigs.size() > 1) {
str::stream message;
@@ -95,11 +93,9 @@ Status checkElectable(const ReplSetConfig& newConfig, int configIndex) {
if (!myConfig.isElectable()) {
return Status(ErrorCodes::NodeNotElectable,
str::stream() << "This node, " << myConfig.getHostAndPort().toString()
- << ", with _id "
- << myConfig.getId()
+ << ", with _id " << myConfig.getId()
<< " is not electable under the new configuration version "
- << newConfig.getConfigVersion()
- << " for replica set "
+ << newConfig.getConfigVersion() << " for replica set "
<< newConfig.getReplSetName());
}
return Status::OK();
@@ -133,8 +129,7 @@ Status validateArbiterPriorities(const ReplSetConfig& config) {
if (iter->isArbiter() && iter->getPriority() != 0) {
return Status(ErrorCodes::InvalidReplicaSetConfig,
str::stream() << "Member " << iter->getHostAndPort().toString()
- << " is an arbiter but has priority "
- << iter->getPriority()
+ << " is an arbiter but has priority " << iter->getPriority()
<< ". Arbiter priority must be 0.");
}
}
@@ -164,10 +159,8 @@ Status validateOldAndNewConfigsCompatible(const ReplSetConfig& oldConfig,
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream()
<< "New replica set configuration version must be greater than old, but "
- << newConfig.getConfigVersion()
- << " is not greater than "
- << oldConfig.getConfigVersion()
- << " for replica set "
+ << newConfig.getConfigVersion() << " is not greater than "
+ << oldConfig.getConfigVersion() << " for replica set "
<< newConfig.getReplSetName());
}
@@ -175,8 +168,7 @@ Status validateOldAndNewConfigsCompatible(const ReplSetConfig& oldConfig,
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream() << "New and old configurations differ in replica set name; "
"old was "
- << oldConfig.getReplSetName()
- << ", and new is "
+ << oldConfig.getReplSetName() << ", and new is "
<< newConfig.getReplSetName());
}
@@ -184,8 +176,7 @@ Status validateOldAndNewConfigsCompatible(const ReplSetConfig& oldConfig,
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream() << "New and old configurations differ in replica set ID; "
"old was "
- << oldConfig.getReplicaSetId()
- << ", and new is "
+ << oldConfig.getReplicaSetId() << ", and new is "
<< newConfig.getReplicaSetId());
}
@@ -216,18 +207,14 @@ Status validateOldAndNewConfigsCompatible(const ReplSetConfig& oldConfig,
}
if (hostsEqual && !idsEqual) {
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- str::stream() << "New and old configurations both have members with "
- << MemberConfig::kHostFieldName
- << " of "
- << mOld->getHostAndPort().toString()
- << " but in the new configuration the "
- << MemberConfig::kIdFieldName
- << " field is "
- << mNew->getId()
- << " and in the old configuration it is "
- << mOld->getId()
- << " for replica set "
- << newConfig.getReplSetName());
+ str::stream()
+ << "New and old configurations both have members with "
+ << MemberConfig::kHostFieldName << " of "
+ << mOld->getHostAndPort().toString()
+ << " but in the new configuration the "
+ << MemberConfig::kIdFieldName << " field is " << mNew->getId()
+ << " and in the old configuration it is " << mOld->getId()
+ << " for replica set " << newConfig.getReplSetName());
}
// At this point, the _id and host fields are equal, so we're looking at the old and
// new configurations for the same member node.
diff --git a/src/mongo/db/repl/repl_set_config_checks_test.cpp b/src/mongo/db/repl/repl_set_config_checks_test.cpp
index b8579f1f6e2..c887e11f69b 100644
--- a/src/mongo/db/repl/repl_set_config_checks_test.cpp
+++ b/src/mongo/db/repl/repl_set_config_checks_test.cpp
@@ -49,34 +49,28 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_VersionMustBe1) {
rses.addSelf(HostAndPort("h1"));
ReplSetConfig config;
- ASSERT_OK(config.initializeForInitiate(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")))));
+ ASSERT_OK(
+ config.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")))));
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
validateConfigForInitiate(&rses, config, getGlobalServiceContext()).getStatus());
}
TEST_F(ServiceContextTest, ValidateConfigForInitiate_MustFindSelf) {
ReplSetConfig config;
- ASSERT_OK(config.initializeForInitiate(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2")
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(
+ config.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ReplicationCoordinatorExternalStateMock notPresentExternalState;
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
@@ -99,21 +93,17 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_MustFindSelf) {
TEST_F(ServiceContextTest, ValidateConfigForInitiate_SelfMustBeElectable) {
ReplSetConfig config;
- ASSERT_OK(config.initializeForInitiate(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "priority"
- << 0)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(
+ config.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
@@ -128,11 +118,7 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_WriteConcernMustBeSatisfiab
ASSERT_OK(
config.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1"))
<< "settings"
@@ -152,55 +138,37 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_ArbiterPriorityMustBeZeroOr
ReplSetConfig twoConfig;
ASSERT_OK(zeroConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "arbiterOnly"
- << true)
+ << "priority" << 0
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(oneConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 1
- << "arbiterOnly"
- << true)
+ << "priority" << 1
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(twoConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 2
- << "arbiterOnly"
- << true)
+ << "priority" << 2
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
@@ -228,11 +196,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigVersionNumberMustB
// Two configurations, identical except for version.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -242,11 +206,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigVersionNumberMustB
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -296,11 +256,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotChangeSetNa
// Two configurations, compatible except for set name.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -310,11 +266,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotChangeSetNa
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs1"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -345,35 +297,25 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotChangeSetId
// Two configurations, compatible except for set ID.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2")
<< BSON("_id" << 3 << "host"
<< "h3"))
- << "settings"
- << BSON("replicaSetId" << OID::gen()))));
+ << "settings" << BSON("replicaSetId" << OID::gen()))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2")
<< BSON("_id" << 3 << "host"
<< "h3"))
- << "settings"
- << BSON("replicaSetId" << OID::gen()))));
+ << "settings" << BSON("replicaSetId" << OID::gen()))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -402,57 +344,40 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotFlipBuildIn
// The third, compatible with the first.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "buildIndexes"
- << false
- << "priority"
- << 0)
+ << "buildIndexes" << false
+ << "priority" << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "buildIndexes"
- << true
- << "priority"
- << 0)
+ << "buildIndexes" << true
+ << "priority" << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
- ASSERT_OK(oldConfigRefresh.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "buildIndexes"
- << false
- << "priority"
- << 0)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(
+ oldConfigRefresh.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "buildIndexes" << false
+ << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -484,51 +409,37 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotFlipArbiter
// The third, compatible with the first.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly"
- << false)
+ << "arbiterOnly" << false)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
- ASSERT_OK(oldConfigRefresh.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "arbiterOnly"
- << false)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(
+ oldConfigRefresh.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "arbiterOnly" << false)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -562,11 +473,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_HostAndIdRemappingRestricte
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -582,10 +489,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_HostAndIdRemappingRestricte
ASSERT_OK(
legalNewConfigWithNewHostAndId.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
+ << "version" << 2 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
@@ -607,11 +511,8 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_HostAndIdRemappingRestricte
//
ASSERT_OK(illegalNewConfigReusingHost.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion"
+ << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 4 << "host"
@@ -638,10 +539,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_HostAndIdRemappingRestricte
//
ASSERT_OK(illegalNewConfigReusingId.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
+ << "version" << 2 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
@@ -662,11 +560,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_MustFindSelf) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -677,11 +571,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_MustFindSelf) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -738,69 +628,46 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_ArbiterPriorityValueMustBeZ
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(zeroConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "arbiterOnly"
- << true)
+ << "priority" << 0
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(oneConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 1
- << "arbiterOnly"
- << true)
+ << "priority" << 1
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(twoConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 2
- << "arbiterOnly"
- << true)
+ << "priority" << 2
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
@@ -831,11 +698,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_SelfMustEndElectable) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -846,17 +709,12 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_SelfMustEndElectable) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
@@ -880,10 +738,7 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_NewConfigInvalid) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
+ << "version" << 2 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
@@ -905,22 +760,14 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigInvalid) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")))));
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -948,22 +795,14 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigWriteConcernNotSat
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")))));
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2"))
<< "settings"
@@ -991,11 +830,7 @@ TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigInvalid) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -1015,15 +850,10 @@ TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigValid) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 1 << "host"
<< "h3")))));
@@ -1041,11 +871,7 @@ TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigWriteConcernNotSati
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2"))
<< "settings"
@@ -1065,11 +891,7 @@ TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigInvalid)
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -1089,11 +911,7 @@ TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigValid) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -1112,11 +930,7 @@ TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigWriteConc
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -1137,11 +951,7 @@ TEST_F(ServiceContextTest, ValidateForReconfig_ForceStillNeedsValidConfig) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -1151,11 +961,7 @@ TEST_F(ServiceContextTest, ValidateForReconfig_ForceStillNeedsValidConfig) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -1176,11 +982,7 @@ TEST_F(ServiceContextTest, ValidateForReconfig_ForceStillNeedsSelfPresent) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -1190,11 +992,7 @@ TEST_F(ServiceContextTest, ValidateForReconfig_ForceStillNeedsSelfPresent) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h3")
<< BSON("_id" << 2 << "host"
diff --git a/src/mongo/db/repl/repl_set_config_test.cpp b/src/mongo/db/repl/repl_set_config_test.cpp
index c795d711aa3..88d36b1b174 100644
--- a/src/mongo/db/repl/repl_set_config_test.cpp
+++ b/src/mongo/db/repl/repl_set_config_test.cpp
@@ -63,11 +63,7 @@ TEST(ReplSetConfig, ParseMinimalConfigAndCheckDefaults) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
@@ -92,32 +88,24 @@ TEST(ReplSetConfig, ParseMinimalConfigAndCheckDefaults) {
TEST(ReplSetConfig, ParseLargeConfigAndCheckAccessors) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1234
- << "members"
- << BSON_ARRAY(BSON("_id" << 234 << "host"
- << "localhost:12345"
- << "tags"
- << BSON("NYC"
- << "NY")))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("getLastErrorDefaults"
- << BSON("w"
- << "majority")
- << "getLastErrorModes"
- << BSON("eastCoast" << BSON("NYC" << 1))
- << "chainingAllowed"
- << false
- << "heartbeatIntervalMillis"
- << 5000
- << "heartbeatTimeoutSecs"
- << 120
- << "electionTimeoutMillis"
- << 10))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1234 << "members"
+ << BSON_ARRAY(BSON("_id" << 234 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY")))
+ << "protocolVersion" << 1 << "settings"
+ << BSON("getLastErrorDefaults"
+ << BSON("w"
+ << "majority")
+ << "getLastErrorModes"
+ << BSON("eastCoast" << BSON("NYC" << 1)) << "chainingAllowed"
+ << false << "heartbeatIntervalMillis" << 5000
+ << "heartbeatTimeoutSecs" << 120 << "electionTimeoutMillis"
+ << 10))));
ASSERT_OK(config.validate());
ASSERT_EQUALS("rs0", config.getReplSetName());
ASSERT_EQUALS(1234, config.getConfigVersion());
@@ -139,27 +127,20 @@ TEST(ReplSetConfig, ParseLargeConfigAndCheckAccessors) {
TEST(ReplSetConfig, GetConnectionStringFiltersHiddenNodes) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:11111")
- << BSON("_id" << 1 << "host"
- << "localhost:22222"
- << "arbiterOnly"
- << true)
- << BSON("_id" << 2 << "host"
- << "localhost:33333"
- << "hidden"
- << true
- << "priority"
- << 0)
- << BSON("_id" << 3 << "host"
- << "localhost:44444")))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:11111")
+ << BSON("_id" << 1 << "host"
+ << "localhost:22222"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 2 << "host"
+ << "localhost:33333"
+ << "hidden" << true << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "localhost:44444")))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(ConnectionString::forReplicaSet(
"rs0", {HostAndPort{"localhost:11111"}, HostAndPort{"localhost:44444"}})
@@ -169,31 +150,22 @@ TEST(ReplSetConfig, GetConnectionStringFiltersHiddenNodes) {
TEST(ReplSetConfig, MajorityCalculationThreeVotersNoArbiters) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1")
- << BSON("_id" << 3 << "host"
- << "h3:1")
- << BSON("_id" << 4 << "host"
- << "h4:1"
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 5 << "host"
- << "h5:1"
- << "votes"
- << 0
- << "priority"
- << 0)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1"
+ << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 5 << "host"
+ << "h5:1"
+ << "votes" << 0 << "priority" << 0)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(2, config.getWriteMajority());
@@ -201,37 +173,25 @@ TEST(ReplSetConfig, MajorityCalculationThreeVotersNoArbiters) {
TEST(ReplSetConfig, MajorityCalculationNearlyHalfArbiters) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id"
- << 0)
- << BSON("host"
- << "node2:12345"
- << "_id"
- << 1)
- << BSON("host"
- << "node3:12345"
- << "_id"
- << 2)
- << BSON("host"
- << "node4:12345"
- << "_id"
- << 3
- << "arbiterOnly"
- << true)
- << BSON("host"
- << "node5:12345"
- << "_id"
- << 4
- << "arbiterOnly"
- << true)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id" << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3 << "arbiterOnly" << true)
+ << BSON("host"
+ << "node5:12345"
+ << "_id" << 4 << "arbiterOnly" << true)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(3, config.getWriteMajority());
}
@@ -240,68 +200,45 @@ TEST(ReplSetConfig, MajorityCalculationEvenNumberOfMembers) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3)))));
+ << "_id" << 3)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(3, config.getWriteMajority());
}
TEST(ReplSetConfig, MajorityCalculationNearlyHalfSecondariesNoVotes) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id"
- << 0)
- << BSON("host"
- << "node2:12345"
- << "_id"
- << 1
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("host"
- << "node3:12345"
- << "_id"
- << 2
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("host"
- << "node4:12345"
- << "_id"
- << 3)
- << BSON("host"
- << "node5:12345"
- << "_id"
- << 4)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(
+ BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1 << "votes" << 0 << "priority" << 0)
+ << BSON("host"
+ << "node3:12345"
+ << "_id" << 2 << "votes" << 0 << "priority" << 0)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3)
+ << BSON("host"
+ << "node5:12345"
+ << "_id" << 4)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(2, config.getWriteMajority());
}
@@ -317,18 +254,14 @@ TEST(ReplSetConfig, ParseFailsWithBadOrMissingIdField) {
// Replica set name must be present.
ASSERT_EQUALS(
ErrorCodes::NoSuchKey,
- config.initialize(
- BSON("version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ config.initialize(BSON("version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
// Empty repl set name parses, but does not validate.
ASSERT_OK(config.initialize(BSON("_id"
<< ""
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
@@ -341,9 +274,7 @@ TEST(ReplSetConfig, ParseFailsWithBadOrMissingVersionField) {
ASSERT_EQUALS(ErrorCodes::NoSuchKey,
config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
@@ -351,29 +282,19 @@ TEST(ReplSetConfig, ParseFailsWithBadOrMissingVersionField) {
<< "rs0"
<< "version"
<< "1"
- << "protocolVersion"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1.0
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1.0 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 0.0
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 0.0 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
@@ -381,9 +302,7 @@ TEST(ReplSetConfig, ParseFailsWithBadOrMissingVersionField) {
<< "rs0"
<< "version"
<< static_cast<long long>(std::numeric_limits<int>::max()) + 1
- << "protocolVersion"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
@@ -394,21 +313,13 @@ TEST(ReplSetConfig, ParseFailsWithBadMembers) {
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< "localhost:23456"))));
ASSERT_NOT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("host"
<< "localhost:12345")))));
}
@@ -417,11 +328,7 @@ TEST(ReplSetConfig, ParseFailsWithLocalNonLocalHostMix) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost")
<< BSON("_id" << 1 << "host"
@@ -433,15 +340,11 @@ TEST(ReplSetConfig, ParseFailsWithNoElectableNodes) {
ReplSetConfig config;
const BSONObj configBsonNoElectableNodes = BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
+ << "version" << 1 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
<< "priority"
@@ -450,57 +353,41 @@ TEST(ReplSetConfig, ParseFailsWithNoElectableNodes) {
ASSERT_OK(config.initialize(configBsonNoElectableNodes));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- const BSONObj configBsonNoElectableNodesOneArbiter = BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(
- BSON("_id" << 0 << "host"
- << "localhost:1"
- << "arbiterOnly"
- << 1)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "priority"
- << 0)));
+ const BSONObj configBsonNoElectableNodesOneArbiter =
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "arbiterOnly" << 1)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "priority" << 0)));
ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- const BSONObj configBsonNoElectableNodesTwoArbiters = BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(
- BSON("_id" << 0 << "host"
- << "localhost:1"
- << "arbiterOnly"
- << 1)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "arbiterOnly"
- << 1)));
+ const BSONObj configBsonNoElectableNodesTwoArbiters =
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "arbiterOnly" << 1)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "arbiterOnly" << 1)));
ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
const BSONObj configBsonOneElectableNode = BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
+ << "version" << 1 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
<< "priority"
@@ -511,46 +398,30 @@ TEST(ReplSetConfig, ParseFailsWithNoElectableNodes) {
TEST(ReplSetConfig, ParseFailsWithTooFewVoters) {
ReplSetConfig config;
- const BSONObj configBsonNoVoters = BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:1"
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "votes"
- << 0
- << "priority"
- << 0)));
+ const BSONObj configBsonNoVoters =
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "votes" << 0 << "priority" << 0)));
ASSERT_OK(config.initialize(configBsonNoVoters));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
const BSONObj configBsonOneVoter = BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "votes"
- << 0
- << "priority"
+ << "votes" << 0 << "priority"
<< 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
- << "votes"
- << 1)));
+ << "votes" << 1)));
ASSERT_OK(config.initialize(configBsonOneVoter));
ASSERT_OK(config.validate());
}
@@ -567,11 +438,7 @@ TEST(ReplSetConfig, ParseFailsWithDuplicateHost) {
ReplSetConfig config;
const BSONObj configBson = BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1")
<< BSON("_id" << 1 << "host"
@@ -621,14 +488,11 @@ TEST(ReplSetConfig, ParseFailsWithTooManyNodes) {
TEST(ReplSetConfig, ParseFailsWithUnexpectedField) {
ReplSetConfig config;
- Status status = config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "unexpectedfield"
- << "value"));
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "unexpectedfield"
+ << "value"));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
}
@@ -636,11 +500,7 @@ TEST(ReplSetConfig, ParseFailsWithNonArrayMembersField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< "value"));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -649,11 +509,7 @@ TEST(ReplSetConfig, ParseFailsWithNonNumericHeartbeatIntervalMillisField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -671,11 +527,7 @@ TEST(ReplSetConfig, ParseFailsWithNonNumericElectionTimeoutMillisField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -688,11 +540,7 @@ TEST(ReplSetConfig, ParseFailsWithNonNumericHeartbeatTimeoutSecsField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -705,11 +553,7 @@ TEST(ReplSetConfig, ParseFailsWithNonBoolChainingAllowedField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -722,11 +566,7 @@ TEST(ReplSetConfig, ParseFailsWithNonBoolConfigServerField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "configsvr"
@@ -738,11 +578,7 @@ TEST(ReplSetConfig, ParseFailsWithNonObjectSettingsField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -754,11 +590,7 @@ TEST(ReplSetConfig, ParseFailsWithGetLastErrorDefaultsFieldUnparseable) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -771,11 +603,7 @@ TEST(ReplSetConfig, ParseFailsWithNonObjectGetLastErrorDefaultsField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -788,11 +616,7 @@ TEST(ReplSetConfig, ParseFailsWithNonObjectGetLastErrorModesField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -805,11 +629,7 @@ TEST(ReplSetConfig, ParseFailsWithDuplicateGetLastErrorModesField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -824,20 +644,16 @@ TEST(ReplSetConfig, ParseFailsWithDuplicateGetLastErrorModesField) {
TEST(ReplSetConfig, ParseFailsWithNonObjectGetLastErrorModesEntryField) {
ReplSetConfig config;
- Status status = config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "tags"
- << BSON("tag"
- << "yes")))
- << "settings"
- << BSON("getLastErrorModes" << BSON("one" << 1))));
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings" << BSON("getLastErrorModes" << BSON("one" << 1))));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -846,11 +662,7 @@ TEST(ReplSetConfig, ParseFailsWithNonNumericGetLastErrorModesConstraintValue) {
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -867,11 +679,7 @@ TEST(ReplSetConfig, ParseFailsWithNegativeGetLastErrorModesConstraintValue) {
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -887,11 +695,7 @@ TEST(ReplSetConfig, ParseFailsWithNonExistentGetLastErrorModesConstraintTag) {
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -906,13 +710,8 @@ TEST(ReplSetConfig, ParseFailsWithRepairField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "repaired"
- << true
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "repaired" << true << "version" << 1
+ << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))));
ASSERT_EQUALS(ErrorCodes::RepairedReplicaSetNode, status);
@@ -922,11 +721,7 @@ TEST(ReplSetConfig, ValidateFailsWithBadProtocolVersion) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 3
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 3 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
@@ -941,11 +736,7 @@ TEST(ReplSetConfig, ValidateFailsWithProtocolVersion0) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 0
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 0 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
@@ -960,11 +751,7 @@ TEST(ReplSetConfig, ValidateFailsWithDuplicateMemberId) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 0 << "host"
@@ -979,15 +766,10 @@ TEST(ReplSetConfig, ValidateFailsWithInvalidMember) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "hidden"
- << true))));
+ << "hidden" << true))));
ASSERT_OK(status);
status = config.validate();
@@ -998,29 +780,19 @@ TEST(ReplSetConfig, ChainingAllowedField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("chainingAllowed" << true))));
+ << "settings" << BSON("chainingAllowed" << true))));
ASSERT_OK(config.validate());
ASSERT_TRUE(config.isChainingAllowed());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("chainingAllowed" << false))));
+ << "settings" << BSON("chainingAllowed" << false))));
ASSERT_OK(config.validate());
ASSERT_FALSE(config.isChainingAllowed());
}
@@ -1029,13 +801,8 @@ TEST(ReplSetConfig, ConfigServerField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_TRUE(config.isConfigServer());
@@ -1043,13 +810,8 @@ TEST(ReplSetConfig, ConfigServerField) {
ReplSetConfig config2;
ASSERT_OK(config2.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "configsvr"
- << false
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "configsvr"
+ << false << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config2.isConfigServer());
@@ -1072,25 +834,18 @@ TEST(ReplSetConfig, ConfigServerFieldDefaults) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config.isConfigServer());
ReplSetConfig config2;
- ASSERT_OK(config2.initializeForInitiate(BSON("_id"
- << "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(
+ config2.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_FALSE(config2.isConfigServer());
serverGlobalParams.clusterRole = ClusterRole::ConfigServer;
@@ -1099,25 +854,18 @@ TEST(ReplSetConfig, ConfigServerFieldDefaults) {
ReplSetConfig config3;
ASSERT_OK(config3.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config3.isConfigServer());
ReplSetConfig config4;
- ASSERT_OK(config4.initializeForInitiate(BSON("_id"
- << "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(
+ config4.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_TRUE(config4.isConfigServer());
}
@@ -1125,29 +873,19 @@ TEST(ReplSetConfig, HeartbeatIntervalField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 5000))));
+ << "settings" << BSON("heartbeatIntervalMillis" << 5000))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Seconds(5), config.getHeartbeatInterval());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << -5000))));
+ << "settings" << BSON("heartbeatIntervalMillis" << -5000))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
}
@@ -1155,29 +893,19 @@ TEST(ReplSetConfig, ElectionTimeoutField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("electionTimeoutMillis" << 20))));
+ << "settings" << BSON("electionTimeoutMillis" << 20))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Milliseconds(20), config.getElectionTimeoutPeriod());
auto status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("electionTimeoutMillis" << -20)));
+ << "settings" << BSON("electionTimeoutMillis" << -20)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "election timeout must be greater than 0");
}
@@ -1186,29 +914,19 @@ TEST(ReplSetConfig, HeartbeatTimeoutField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 20))));
+ << "settings" << BSON("heartbeatTimeoutSecs" << 20))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Seconds(20), config.getHeartbeatTimeoutPeriod());
auto status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("heartbeatTimeoutSecs" << -20)));
+ << "settings" << BSON("heartbeatTimeoutSecs" << -20)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "heartbeat timeout must be greater than 0");
}
@@ -1217,11 +935,7 @@ TEST(ReplSetConfig, GleDefaultField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -1232,11 +946,7 @@ TEST(ReplSetConfig, GleDefaultField) {
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -1244,27 +954,19 @@ TEST(ReplSetConfig, GleDefaultField) {
<< "frim")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"))
- << "settings"
- << BSON("getLastErrorDefaults" << BSON("w" << 0)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("getLastErrorDefaults" << BSON("w" << 0)))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
ASSERT_OK(
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -1352,19 +1054,15 @@ bool operator==(const ReplSetConfig& a, const ReplSetConfig& b) {
TEST(ReplSetConfig, toBSONRoundTripAbility) {
ReplSetConfig configA;
ReplSetConfig configB;
- ASSERT_OK(configA.initialize(BSON(
- "_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20 << "replicaSetId"
- << OID::gen()))));
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis"
+ << 5000 << "heartbeatTimeoutSecs" << 20
+ << "replicaSetId" << OID::gen()))));
ASSERT_OK(configB.initialize(configA.toBSON()));
ASSERT_TRUE(configA == configB);
}
@@ -1372,132 +1070,83 @@ TEST(ReplSetConfig, toBSONRoundTripAbility) {
TEST(ReplSetConfig, toBSONRoundTripAbilityWithHorizon) {
ReplSetConfig configA;
ReplSetConfig configB;
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "horizons"
+ << BSON("horizon"
+ << "example.com:42")))
+ << "settings"
+ << BSON("heartbeatIntervalMillis"
+ << 5000 << "heartbeatTimeoutSecs" << 20
+ << "replicaSetId" << OID::gen()))));
+ ASSERT_OK(configB.initialize(configA.toBSON()));
+ ASSERT_TRUE(configA == configB);
+}
+
+TEST(ReplSetConfig, toBSONRoundTripAbilityLarge) {
+ ReplSetConfig configA;
+ ReplSetConfig configB;
ASSERT_OK(configA.initialize(BSON(
"_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "asdf"
+ << "version" << 9 << "writeConcernMajorityJournalDefault" << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "horizons"
- << BSON("horizon"
- << "example.com:42")))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20 << "replicaSetId"
- << OID::gen()))));
- ASSERT_OK(configB.initialize(configA.toBSON()));
+ << "arbiterOnly" << true << "votes" << 1)
+ << BSON("_id" << 3 << "host"
+ << "localhost:3828"
+ << "arbiterOnly" << false << "hidden" << true << "buildIndexes"
+ << false << "priority" << 0 << "slaveDelay" << 17 << "votes"
+ << 0 << "tags"
+ << BSON("coast"
+ << "east"
+ << "ssd"
+ << "true"))
+ << BSON("_id" << 2 << "host"
+ << "foo.com:3828"
+ << "votes" << 0 << "priority" << 0 << "tags"
+ << BSON("coast"
+ << "west"
+ << "hdd"
+ << "true")))
+ << "protocolVersion" << 1 << "settings"
+
+ << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20
+ << "electionTimeoutMillis" << 4 << "chainingAllowd"
+ << true << "getLastErrorDefaults"
+ << BSON("w"
+ << "majority")
+ << "getLastErrorModes"
+ << BSON("disks" << BSON("ssd" << 1 << "hdd" << 1)
+ << "coasts" << BSON("coast" << 2))))));
+ BSONObj configObjA = configA.toBSON();
+ ASSERT_OK(configB.initialize(configObjA));
ASSERT_TRUE(configA == configB);
}
-TEST(ReplSetConfig, toBSONRoundTripAbilityLarge) {
+TEST(ReplSetConfig, toBSONRoundTripAbilityInvalid) {
ReplSetConfig configA;
ReplSetConfig configB;
ASSERT_OK(configA.initialize(
BSON("_id"
- << "asdf"
- << "version"
- << 9
- << "writeConcernMajorityJournalDefault"
- << true
- << "members"
+ << ""
+ << "version" << -3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "arbiterOnly"
- << true
- << "votes"
- << 1)
- << BSON("_id" << 3 << "host"
+ << "arbiterOnly" << true << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 0 << "host"
<< "localhost:3828"
- << "arbiterOnly"
- << false
- << "hidden"
- << true
- << "buildIndexes"
- << false
- << "priority"
- << 0
- << "slaveDelay"
- << 17
- << "votes"
- << 0
- << "tags"
- << BSON("coast"
- << "east"
- << "ssd"
- << "true"))
+ << "arbiterOnly" << false << "buildIndexes" << false
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
- << "foo.com:3828"
- << "votes"
- << 0
- << "priority"
- << 0
- << "tags"
- << BSON("coast"
- << "west"
- << "hdd"
- << "true")))
- << "protocolVersion"
- << 1
+ << "localhost:3828"
+ << "votes" << 0 << "priority" << 0))
<< "settings"
-
- << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20
- << "electionTimeoutMillis"
- << 4
- << "chainingAllowd"
- << true
- << "getLastErrorDefaults"
- << BSON("w"
- << "majority")
- << "getLastErrorModes"
- << BSON("disks" << BSON("ssd" << 1 << "hdd" << 1)
- << "coasts"
- << BSON("coast" << 2))))));
- BSONObj configObjA = configA.toBSON();
- ASSERT_OK(configB.initialize(configObjA));
- ASSERT_TRUE(configA == configB);
-}
-
-TEST(ReplSetConfig, toBSONRoundTripAbilityInvalid) {
- ReplSetConfig configA;
- ReplSetConfig configB;
- ASSERT_OK(
- configA.initialize(BSON("_id"
- << ""
- << "version"
- << -3
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "arbiterOnly"
- << true
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 0 << "host"
- << "localhost:3828"
- << "arbiterOnly"
- << false
- << "buildIndexes"
- << false
- << "priority"
- << 2)
- << BSON("_id" << 2 << "host"
- << "localhost:3828"
- << "votes"
- << 0
- << "priority"
- << 0))
- << "settings"
- << BSON("heartbeatIntervalMillis" << -5000 << "heartbeatTimeoutSecs"
- << 20
- << "electionTimeoutMillis"
- << 2))));
+ << BSON("heartbeatIntervalMillis" << -5000 << "heartbeatTimeoutSecs" << 20
+ << "electionTimeoutMillis" << 2))));
ASSERT_OK(configB.initialize(configA.toBSON()));
ASSERT_NOT_OK(configA.validate());
ASSERT_NOT_OK(configB.validate());
@@ -1506,59 +1155,52 @@ TEST(ReplSetConfig, toBSONRoundTripAbilityInvalid) {
TEST(ReplSetConfig, CheckIfWriteConcernCanBeSatisfied) {
ReplSetConfig configA;
- ASSERT_OK(configA.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node0"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA1"))
- << BSON("_id" << 1 << "host"
- << "node1"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA2"))
- << BSON("_id" << 2 << "host"
- << "node2"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA3"))
- << BSON("_id" << 3 << "host"
- << "node3"
- << "tags"
- << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU1"))
- << BSON("_id" << 4 << "host"
- << "node4"
- << "tags"
- << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU2"))
- << BSON("_id" << 5 << "host"
- << "node5"
- << "arbiterOnly"
- << true))
- << "settings"
- << BSON("getLastErrorModes"
- << BSON("valid" << BSON("dc" << 2 << "rack" << 3)
- << "invalidNotEnoughValues"
- << BSON("dc" << 3)
- << "invalidNotEnoughNodes"
- << BSON("rack" << 6))))));
+ ASSERT_OK(configA.initialize(BSON(
+ "_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node0"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA1"))
+ << BSON("_id" << 1 << "host"
+ << "node1"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA2"))
+ << BSON("_id" << 2 << "host"
+ << "node2"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA3"))
+ << BSON("_id" << 3 << "host"
+ << "node3"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU1"))
+ << BSON("_id" << 4 << "host"
+ << "node4"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU2"))
+ << BSON("_id" << 5 << "host"
+ << "node5"
+ << "arbiterOnly" << true))
+ << "settings"
+ << BSON("getLastErrorModes" << BSON(
+ "valid" << BSON("dc" << 2 << "rack" << 3) << "invalidNotEnoughValues"
+ << BSON("dc" << 3) << "invalidNotEnoughNodes" << BSON("rack" << 6))))));
WriteConcernOptions validNumberWC;
validNumberWC.wNumNodes = 5;
@@ -1619,19 +1261,13 @@ TEST(ReplSetConfig, CheckConfigServerCantHaveArbiters) {
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "arbiterOnly"
- << true)))));
+ << "arbiterOnly" << true)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "Arbiters are not allowed");
@@ -1641,21 +1277,14 @@ TEST(ReplSetConfig, CheckConfigServerMustBuildIndexes) {
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 0
- << "buildIndexes"
- << false)))));
+ << "priority" << 0
+ << "buildIndexes" << false)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "must build indexes");
@@ -1665,20 +1294,13 @@ TEST(ReplSetConfig, CheckConfigServerCantHaveSlaveDelay) {
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 0
- << "slaveDelay"
+ << "priority" << 0 << "slaveDelay"
<< 3)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
@@ -1691,19 +1313,13 @@ TEST(ReplSetConfig, CheckConfigServerMustHaveTrueForWriteConcernMajorityJournalD
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"))
- << "writeConcernMajorityJournalDefault"
- << false)));
+ << "writeConcernMajorityJournalDefault" << false)));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), " must be true in replica set configurations being ");
@@ -1713,33 +1329,23 @@ TEST(ReplSetConfig, GetPriorityTakeoverDelay) {
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1)
+ << "priority" << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 3 << "host"
<< "localhost:5421"
- << "priority"
- << 4)
+ << "priority" << 4)
<< BSON("_id" << 4 << "host"
<< "localhost:5431"
- << "priority"
- << 5))
- << "settings"
- << BSON("electionTimeoutMillis" << 1000))));
+ << "priority" << 5))
+ << "settings" << BSON("electionTimeoutMillis" << 1000))));
ASSERT_OK(configA.validate());
ASSERT_EQUALS(Milliseconds(5000), configA.getPriorityTakeoverDelay(0));
ASSERT_EQUALS(Milliseconds(4000), configA.getPriorityTakeoverDelay(1));
@@ -1750,33 +1356,23 @@ TEST(ReplSetConfig, GetPriorityTakeoverDelay) {
ReplSetConfig configB;
ASSERT_OK(configB.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1)
+ << "priority" << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 3 << "host"
<< "localhost:5421"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 4 << "host"
<< "localhost:5431"
- << "priority"
- << 3))
- << "settings"
- << BSON("electionTimeoutMillis" << 1000))));
+ << "priority" << 3))
+ << "settings" << BSON("electionTimeoutMillis" << 1000))));
ASSERT_OK(configB.validate());
ASSERT_EQUALS(Milliseconds(5000), configB.getPriorityTakeoverDelay(0));
ASSERT_EQUALS(Milliseconds(3000), configB.getPriorityTakeoverDelay(1));
@@ -1789,29 +1385,20 @@ TEST(ReplSetConfig, GetCatchUpTakeoverDelay) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("catchUpTakeoverDelayMillis" << 5000))));
+ << "settings" << BSON("catchUpTakeoverDelayMillis" << 5000))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Milliseconds(5000), config.getCatchUpTakeoverDelay());
- Status status = config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"))
- << "settings"
- << BSON("catchUpTakeoverDelayMillis" << -5000)));
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("catchUpTakeoverDelayMillis" << -5000)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(
status.reason(),
@@ -1822,23 +1409,16 @@ TEST(ReplSetConfig, GetCatchUpTakeoverDelayDefault) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1)
+ << "priority" << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority"
- << 3)))));
+ << "priority" << 3)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Milliseconds(30000), config.getCatchUpTakeoverDelay());
}
@@ -1849,11 +1429,7 @@ TEST(ReplSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajorityJou
// PV1, should default to true.
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
@@ -1863,15 +1439,10 @@ TEST(ReplSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajorityJou
// Should be able to set it false in PV1.
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "writeConcernMajorityJournalDefault"
- << false)));
+ << "writeConcernMajorityJournalDefault" << false)));
ASSERT_OK(config.validate());
ASSERT_FALSE(config.getWriteConcernMajorityShouldJournal());
ASSERT_TRUE(config.toBSON().hasField("writeConcernMajorityJournalDefault"));
@@ -1881,11 +1452,7 @@ TEST(ReplSetConfig, HorizonConsistency) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "horizons"
@@ -1914,8 +1481,7 @@ TEST(ReplSetConfig, HorizonConsistency) {
<< "delta"
<< "c.host3:44")))
- << "writeConcernMajorityJournalDefault"
- << false)));
+ << "writeConcernMajorityJournalDefault" << false)));
Status status = config.validate();
ASSERT_NOT_OK(status);
@@ -1929,11 +1495,7 @@ TEST(ReplSetConfig, HorizonConsistency) {
// in the member-config code path.
status = config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "same1"
<< "horizons"
@@ -1978,8 +1540,7 @@ TEST(ReplSetConfig, HorizonConsistency) {
<< "d.host3:44"
<< "delta"
<< "d.host4:44")))
- << "writeConcernMajorityJournalDefault"
- << false));
+ << "writeConcernMajorityJournalDefault" << false));
ASSERT_OK(status) << " failing status was: " << status.reason();
status = config.validate();
@@ -2003,15 +1564,11 @@ TEST(ReplSetConfig, ReplSetId) {
auto status =
ReplSetConfig().initializeForInitiate(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
+ << "version" << 1 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))
+ << "priority" << 1))
<< "settings"
<< BSON("replicaSetId" << OID::gen())));
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, status);
@@ -2025,15 +1582,11 @@ TEST(ReplSetConfig, ReplSetId) {
ASSERT_OK(
configInitiate.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
+ << "version" << 1 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1)))));
+ << "priority" << 1)))));
ASSERT_OK(configInitiate.validate());
ASSERT_TRUE(configInitiate.hasReplicaSetId());
OID replicaSetId = configInitiate.getReplicaSetId();
@@ -2042,17 +1595,11 @@ TEST(ReplSetConfig, ReplSetId) {
ReplSetConfig configLocal;
ASSERT_OK(configLocal.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))
- << "settings"
- << BSON("replicaSetId" << replicaSetId))));
+ << "priority" << 1))
+ << "settings" << BSON("replicaSetId" << replicaSetId))));
ASSERT_OK(configLocal.validate());
ASSERT_TRUE(configLocal.hasReplicaSetId());
ASSERT_EQUALS(replicaSetId, configLocal.getReplicaSetId());
@@ -2061,15 +1608,10 @@ TEST(ReplSetConfig, ReplSetId) {
OID defaultReplicaSetId = OID::gen();
ASSERT_OK(configLocal.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))),
+ << "priority" << 1))),
defaultReplicaSetId));
ASSERT_OK(configLocal.validate());
ASSERT_TRUE(configLocal.hasReplicaSetId());
@@ -2078,34 +1620,22 @@ TEST(ReplSetConfig, ReplSetId) {
// 'replicaSetId' field cannot be null.
status = configLocal.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))
- << "settings"
- << BSON("replicaSetId" << OID())));
+ << "priority" << 1))
+ << "settings" << BSON("replicaSetId" << OID())));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "replicaSetId field value cannot be null");
// 'replicaSetId' field must be an OID.
status = configLocal.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))
- << "settings"
- << BSON("replicaSetId" << 12345)));
+ << "priority" << 1))
+ << "settings" << BSON("replicaSetId" << 12345)));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
ASSERT_STRING_CONTAINS(status.reason(),
"\"replicaSetId\" had the wrong type. Expected objectId, found int");
diff --git a/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp b/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp
index 3b79768db8d..a7cc785995e 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp
@@ -78,10 +78,9 @@ Status ReplSetHeartbeatArgsV1::initialize(const BSONObj& argsObj) {
if (status.isOK()) {
if (tempHeartbeatVersion != 1) {
return Status(ErrorCodes::Error(40666),
- str::stream() << "Found invalid value for field "
- << kHeartbeatVersionFieldName
- << ": "
- << tempHeartbeatVersion);
+ str::stream()
+ << "Found invalid value for field " << kHeartbeatVersionFieldName
+ << ": " << tempHeartbeatVersion);
}
_heartbeatVersion = tempHeartbeatVersion;
_hasHeartbeatVersion = true;
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.cpp b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
index 4b16c88e389..5c43a35c71b 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
@@ -193,18 +193,18 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc,
if (memberStateElement.eoo()) {
_stateSet = false;
} else if (memberStateElement.type() != NumberInt && memberStateElement.type() != NumberLong) {
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << "Expected \"" << kMemberStateFieldName
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << "Expected \"" << kMemberStateFieldName
<< "\" field in response to replSetHeartbeat "
"command to have type NumberInt or NumberLong, but found type "
<< typeName(memberStateElement.type()));
} else {
long long stateInt = memberStateElement.numberLong();
if (stateInt < 0 || stateInt > MemberState::RS_MAX) {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Value for \"" << kMemberStateFieldName
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Value for \"" << kMemberStateFieldName
<< "\" in response to replSetHeartbeat is "
"out of range; legal values are non-negative and no more than "
<< MemberState::RS_MAX);
@@ -217,8 +217,7 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc,
if (configVersionElement.eoo()) {
return Status(ErrorCodes::NoSuchKey,
str::stream() << "Response to replSetHeartbeat missing required \""
- << kConfigVersionFieldName
- << "\" field");
+ << kConfigVersionFieldName << "\" field");
}
if (configVersionElement.type() != NumberInt) {
return Status(ErrorCodes::TypeMismatch,
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
index f3f0f1ce8bb..352456c929d 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
@@ -166,19 +166,16 @@ TEST(ReplSetHeartbeatResponse, InitializeNoDurableWallTime) {
TEST(ReplSetHeartbeatResponse, InitializeWrongAppliedOpTimeType) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << "hello");
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << "hello");
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS("\"opTime\" had the wrong type. Expected object, found string", result.reason());
initializerObj = BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
- << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
<< OpTime().getTimestamp());
result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
@@ -190,9 +187,7 @@ TEST(ReplSetHeartbeatResponse, InitializeNoAppliedWallTime) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON());
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON());
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::NoSuchKey, result);
ASSERT_EQUALS("Missing expected field \"wallTime\"", result.reason());
@@ -202,12 +197,8 @@ TEST(ReplSetHeartbeatResponse, InitializeMemberStateWrongType) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "state"
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100) << "state"
<< "hello");
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
@@ -221,13 +212,8 @@ TEST(ReplSetHeartbeatResponse, InitializeMemberStateTooLow) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "state"
- << -1);
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100) << "state" << -1);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::BadValue, result);
ASSERT_EQUALS(
@@ -240,13 +226,8 @@ TEST(ReplSetHeartbeatResponse, InitializeMemberStateTooHigh) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "state"
- << 11);
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100) << "state" << 11);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::BadValue, result);
ASSERT_EQUALS(
@@ -259,12 +240,8 @@ TEST(ReplSetHeartbeatResponse, InitializeVersionWrongType) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100) << "v"
<< "hello");
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
@@ -276,17 +253,12 @@ TEST(ReplSetHeartbeatResponse, InitializeVersionWrongType) {
TEST(ReplSetHeartbeatResponse, InitializeReplSetNameWrongType) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 2 // needs a version to get this far in initialize()
- << "set"
- << 4);
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime" << Date_t() + Seconds(100)
+ << "v" << 2 // needs a version to get this far in initialize()
+ << "set" << 4);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -297,17 +269,12 @@ TEST(ReplSetHeartbeatResponse, InitializeReplSetNameWrongType) {
TEST(ReplSetHeartbeatResponse, InitializeSyncingToWrongType) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 2 // needs a version to get this far in initialize()
- << "syncingTo"
- << 4);
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime" << Date_t() + Seconds(100)
+ << "v" << 2 // needs a version to get this far in initialize()
+ << "syncingTo" << 4);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -318,17 +285,12 @@ TEST(ReplSetHeartbeatResponse, InitializeSyncingToWrongType) {
TEST(ReplSetHeartbeatResponse, InitializeConfigWrongType) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 2 // needs a version to get this far in initialize()
- << "config"
- << 4);
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime" << Date_t() + Seconds(100)
+ << "v" << 2 // needs a version to get this far in initialize()
+ << "config" << 4);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -339,17 +301,12 @@ TEST(ReplSetHeartbeatResponse, InitializeConfigWrongType) {
TEST(ReplSetHeartbeatResponse, InitializeBadConfig) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 2 // needs a version to get this far in initialize()
- << "config"
- << BSON("illegalFieldName" << 2));
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime" << Date_t() + Seconds(100)
+ << "v" << 2 // needs a version to get this far in initialize()
+ << "config" << BSON("illegalFieldName" << 2));
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::BadValue, result);
ASSERT_EQUALS("Unexpected field illegalFieldName in replica set configuration",
@@ -371,12 +328,9 @@ TEST(ReplSetHeartbeatResponse, InvalidResponseOpTimeMissesConfigVersion) {
ReplSetHeartbeatResponse hbResp;
Status result = hbResp.initialize(BSON("ok" << 1.0 << "durableOpTime"
<< OpTime(Timestamp(100, 0), 0).toBSON()
- << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)),
+ << "durableWallTime" << Date_t() + Seconds(100)
+ << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100)),
0,
/*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.code());
diff --git a/src/mongo/db/repl/replication_consistency_markers_impl.cpp b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
index f2127b70518..a57b7e35ceb 100644
--- a/src/mongo/db/repl/replication_consistency_markers_impl.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
@@ -100,8 +100,7 @@ void ReplicationConsistencyMarkersImpl::initializeMinValidDocument(OperationCont
// will always be greater than the provided ones.
TimestampedBSONObj upsert;
upsert.obj = BSON("$max" << BSON(MinValidDocument::kMinValidTimestampFieldName
- << Timestamp()
- << MinValidDocument::kMinValidTermFieldName
+ << Timestamp() << MinValidDocument::kMinValidTermFieldName
<< OpTime::kUninitializedTerm));
// The initialization write should go into the first checkpoint taken, so we provide no
@@ -153,10 +152,8 @@ void ReplicationConsistencyMarkersImpl::clearInitialSyncFlag(OperationContext* o
update.obj = BSON("$unset" << kInitialSyncFlag << "$set"
<< BSON(MinValidDocument::kMinValidTimestampFieldName
<< time.getTimestamp()
- << MinValidDocument::kMinValidTermFieldName
- << time.getTerm()
- << MinValidDocument::kAppliedThroughFieldName
- << time));
+ << MinValidDocument::kMinValidTermFieldName << time.getTerm()
+ << MinValidDocument::kAppliedThroughFieldName << time));
// We clear the initial sync flag at the 'lastAppliedOpTime'. This is unnecessary, since there
// should not be any stable checkpoints being taken that this write could inadvertantly enter.
@@ -194,10 +191,10 @@ void ReplicationConsistencyMarkersImpl::setMinValid(OperationContext* opCtx,
LOG(3) << "setting minvalid to exactly: " << minValid.toString() << "(" << minValid.toBSON()
<< ")";
TimestampedBSONObj update;
- update.obj = BSON("$set" << BSON(MinValidDocument::kMinValidTimestampFieldName
- << minValid.getTimestamp()
- << MinValidDocument::kMinValidTermFieldName
- << minValid.getTerm()));
+ update.obj =
+ BSON("$set" << BSON(MinValidDocument::kMinValidTimestampFieldName
+ << minValid.getTimestamp() << MinValidDocument::kMinValidTermFieldName
+ << minValid.getTerm()));
// This method is only used with storage engines that do not support recover to stable
// timestamp. As a result, their timestamps do not matter.
@@ -346,8 +343,8 @@ Status ReplicationConsistencyMarkersImpl::createInternalCollections(OperationCon
auto status = _storageInterface->createCollection(opCtx, nss, CollectionOptions());
if (!status.isOK() && status.code() != ErrorCodes::NamespaceExists) {
return {ErrorCodes::CannotCreateCollection,
- str::stream() << "Failed to create collection. Ns: " << nss.ns() << " Error: "
- << status.toString()};
+ str::stream() << "Failed to create collection. Ns: " << nss.ns()
+ << " Error: " << status.toString()};
}
}
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index b9eee2a78a9..8b767924eac 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -774,12 +774,12 @@ public:
virtual std::vector<MemberData> getMemberData() const = 0;
/*
- * Handles an incoming replSetRequestVotes command.
- *
- * Populates the given 'response' object with the result of the request. If there is a failure
- * processing the vote request, returns an error status. If an error is returned, the value of
- * the populated 'response' object is invalid.
- */
+ * Handles an incoming replSetRequestVotes command.
+ *
+ * Populates the given 'response' object with the result of the request. If there is a failure
+ * processing the vote request, returns an error status. If an error is returned, the value of
+ * the populated 'response' object is invalid.
+ */
virtual Status processReplSetRequestVotes(OperationContext* opCtx,
const ReplSetRequestVotesArgs& args,
ReplSetRequestVotesResponse* response) = 0;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 0df40b457c2..1052ccec515 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -171,13 +171,13 @@ auto makeTaskExecutor(ServiceContext* service, const std::string& poolName) {
* down.
*/
void scheduleWork(executor::TaskExecutor* executor, executor::TaskExecutor::CallbackFn work) {
- auto cbh = executor->scheduleWork([work = std::move(work)](
- const executor::TaskExecutor::CallbackArgs& args) {
- if (args.status == ErrorCodes::CallbackCanceled) {
- return;
- }
- work(args);
- });
+ auto cbh = executor->scheduleWork(
+ [work = std::move(work)](const executor::TaskExecutor::CallbackArgs& args) {
+ if (args.status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ work(args);
+ });
if (cbh == ErrorCodes::ShutdownInProgress) {
return;
}
@@ -552,9 +552,7 @@ Status ReplicationCoordinatorExternalStateImpl::createLocalLastVoteCollection(
if (!status.isOK() && status.code() != ErrorCodes::NamespaceExists) {
return {ErrorCodes::CannotCreateCollection,
str::stream() << "Failed to create local last vote collection. Ns: "
- << lastVoteCollectionName
- << " Error: "
- << status.toString()};
+ << lastVoteCollectionName << " Error: " << status.toString()};
}
// Make sure there's always a last vote document.
@@ -682,9 +680,7 @@ StatusWith<OpTimeAndWallTime> ReplicationCoordinatorExternalStateImpl::loadLastO
return StatusWith<OpTimeAndWallTime>(
ErrorCodes::NoSuchKey,
str::stream() << "Most recent entry in " << NamespaceString::kRsOplogNamespace.ns()
- << " missing \""
- << tsFieldName
- << "\" field");
+ << " missing \"" << tsFieldName << "\" field");
}
if (tsElement.type() != bsonTimestamp) {
return StatusWith<OpTimeAndWallTime>(
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index af36200521b..41abb8bc4e6 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -134,11 +134,11 @@ private:
void _shardingOnTransitionToPrimaryHook(OperationContext* opCtx);
/**
- * Drops all temporary collections on all databases except "local".
- *
- * The implementation may assume that the caller has acquired the global exclusive lock
- * for "opCtx".
- */
+ * Drops all temporary collections on all databases except "local".
+ *
+ * The implementation may assume that the caller has acquired the global exclusive lock
+ * for "opCtx".
+ */
void _dropAllTempCollections(OperationContext* opCtx);
ServiceContext* _service;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index db61adde61d..2509aa1d29e 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -927,10 +927,9 @@ Status ReplicationCoordinatorImpl::waitForMemberState(MemberState expectedState,
auto pred = [this, expectedState]() { return _memberState == expectedState; };
if (!_memberStateChange.wait_for(lk, timeout.toSystemDuration(), pred)) {
return Status(ErrorCodes::ExceededTimeLimit,
- str::stream() << "Timed out waiting for state to become "
- << expectedState.toString()
- << ". Current state is "
- << _memberState.toString());
+ str::stream()
+ << "Timed out waiting for state to become " << expectedState.toString()
+ << ". Current state is " << _memberState.toString());
}
return Status::OK();
}
@@ -1642,8 +1641,9 @@ bool ReplicationCoordinatorImpl::_doneWaitingForReplication_inlock(
"'committed' optime "
<< opTime
<< ". There are still drop pending collections (earliest drop optime: "
- << *dropOpTime << ") that have to be removed from storage before we can "
- "satisfy the write concern "
+ << *dropOpTime
+ << ") that have to be removed from storage before we can "
+ "satisfy the write concern "
<< writeConcern.toBSON();
return false;
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 8bb0ca7488c..73fa6dbae41 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -546,7 +546,7 @@ private:
* Loops continuously to kill all conflicting operations. And, aborts all stashed (inactive)
* transactions.
* Terminates once killSignaled is set true.
- */
+ */
void _killOpThreadFn();
/*
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index b0d3d8c3556..0402d4a2aed 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -59,11 +59,7 @@ using ApplierState = ReplicationCoordinator::ApplierState;
TEST_F(ReplCoordTest, RandomizedElectionOffsetWithinProperBounds) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -90,18 +86,14 @@ TEST_F(ReplCoordTest, RandomizedElectionOffsetWithinProperBounds) {
TEST_F(ReplCoordTest, RandomizedElectionOffsetAvoidsDivideByZero) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 1));
assertStartSuccess(configObj, HostAndPort("node1", 12345));
@@ -112,24 +104,17 @@ TEST_F(ReplCoordTest, RandomizedElectionOffsetAvoidsDivideByZero) {
}
TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
- assertStartSuccess(BSON("_id"
- << "mySet"
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"
- << "votes"
- << 0
- << "hidden"
- << true
- << "priority"
- << 0))
- << "protocolVersion"
- << 1),
- HostAndPort("node1", 12345));
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"
+ << "votes" << 0 << "hidden" << true << "priority" << 0))
+ << "protocolVersion" << 1),
+ HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -184,15 +169,12 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
TEST_F(ReplCoordTest, StartElectionDoesNotStartAnElectionWhenNodeIsRecovering) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_RECOVERING));
@@ -212,13 +194,10 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(10, 1), 0), Date_t() + Seconds(10));
@@ -246,17 +225,14 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
OperationContextNoop opCtx;
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -279,9 +255,7 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) {
TEST_F(ReplCoordTest, ElectionSucceedsWhenMaxSevenNodesVoteYea) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -296,8 +270,7 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenMaxSevenNodesVoteYea) {
<< "node6:12345")
<< BSON("_id" << 7 << "host"
<< "node7:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
OperationContextNoop opCtx;
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -321,17 +294,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun)
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -363,9 +333,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun)
} else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
net->scheduleResponse(noi,
net->now(),
- makeResponseStatus(BSON(
- "ok" << 1 << "term" << 0 << "voteGranted" << false << "reason"
- << "don't like him much")));
+ makeResponseStatus(BSON("ok" << 1 << "term" << 0 << "voteGranted"
+ << false << "reason"
+ << "don't like him much")));
voteRequests++;
} else {
net->blackHole(noi);
@@ -382,17 +352,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -426,9 +393,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) {
noi,
net->now(),
makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long() + 1
- << "voteGranted"
- << false
- << "reason"
+ << "voteGranted" << false << "reason"
<< "quit living in the past")));
voteRequests++;
} else {
@@ -449,9 +414,7 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
OperationContextNoop opCtx;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -462,8 +425,7 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
<< "node4:12345")
<< BSON("_id" << 5 << "host"
<< "node5:12345"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -481,15 +443,12 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
config
.initialize(BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "members"
+ << "version" << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "protocolVersion"
- << 1))
+ << "protocolVersion" << 1))
.transitional_ignore();
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
@@ -576,17 +535,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -610,9 +566,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
} else {
net->scheduleResponse(noi,
net->now(),
- makeResponseStatus(BSON(
- "ok" << 1 << "term" << 1 << "voteGranted" << false << "reason"
- << "don't like him much")));
+ makeResponseStatus(BSON("ok" << 1 << "term" << 1 << "voteGranted"
+ << false << "reason"
+ << "don't like him much")));
}
net->runReadyNetworkOperations();
}
@@ -627,17 +583,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
TEST_F(ReplCoordTest, TransitionToRollbackFailsWhenElectionInProgress) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -666,17 +619,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -702,9 +652,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) {
noi,
net->now(),
makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long() + 1
- << "voteGranted"
- << false
- << "reason"
+ << "voteGranted" << false << "reason"
<< "quit living in the past")));
}
net->runReadyNetworkOperations();
@@ -721,17 +669,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringDryRun) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -762,17 +707,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -799,10 +741,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
net->scheduleResponse(
noi,
net->now(),
- makeResponseStatus(BSON(
- "ok" << 1 << "term" << request.cmdObj["term"].Long() << "voteGranted" << true
- << "reason"
- << "")));
+ makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true << "reason"
+ << "")));
}
net->runReadyNetworkOperations();
}
@@ -965,18 +906,14 @@ private:
TEST_F(TakeoverTest, DoesntScheduleCatchupTakeoverIfCatchupDisabledButTakeoverDelaySet) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("catchUpTimeoutMillis" << 0 << "catchUpTakeoverDelay"
<< 10000));
assertStartSuccess(configObj, HostAndPort("node1", 12345));
@@ -1007,17 +944,14 @@ TEST_F(TakeoverTest, DoesntScheduleCatchupTakeoverIfCatchupDisabledButTakeoverDe
TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfNodeIsFresherThanCurrentPrimary) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1054,21 +988,16 @@ TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfNodeIsFresherThanCurrentPrimary)
TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfBothTakeoversAnOption) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1);
+ << "priority" << 3))
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1106,19 +1035,15 @@ TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfBothTakeoversAnOption) {
TEST_F(TakeoverTest, PrefersPriorityToCatchupTakeoverIfNodeHasHighestPriority) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(2));
startCapturingLogMessages();
@@ -1162,17 +1087,14 @@ TEST_F(TakeoverTest, PrefersPriorityToCatchupTakeoverIfNodeHasHighestPriority) {
TEST_F(TakeoverTest, CatchupTakeoverNotScheduledTwice) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1219,21 +1141,16 @@ TEST_F(TakeoverTest, CatchupTakeoverNotScheduledTwice) {
TEST_F(TakeoverTest, CatchupAndPriorityTakeoverNotScheduledAtSameTime) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1);
+ << "priority" << 3))
+ << "protocolVersion" << 1);
// In order for node 1 to first schedule a catchup takeover, then a priority takeover
// once the first gets canceled, it must have a higher priority than the current primary
// (node 2). But, it must not have the highest priority in the replica set. Otherwise,
@@ -1285,17 +1202,14 @@ TEST_F(TakeoverTest, CatchupAndPriorityTakeoverNotScheduledAtSameTime) {
TEST_F(TakeoverTest, CatchupTakeoverCallbackCanceledIfElectionTimeoutRuns) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1359,17 +1273,14 @@ TEST_F(TakeoverTest, CatchupTakeoverCallbackCanceledIfElectionTimeoutRuns) {
TEST_F(TakeoverTest, CatchupTakeoverCanceledIfTransitionToRollback) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1422,17 +1333,14 @@ TEST_F(TakeoverTest, CatchupTakeoverCanceledIfTransitionToRollback) {
TEST_F(TakeoverTest, SuccessfulCatchupTakeover) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
HostAndPort primaryHostAndPort("node2", 12345);
@@ -1489,9 +1397,7 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -1502,8 +1408,7 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
<< "node4:12345")
<< BSON("_id" << 5 << "host"
<< "node5:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
HostAndPort primaryHostAndPort("node2", 12345);
@@ -1565,12 +1470,11 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
net->blackHole(noi);
} else {
bool voteGranted = request.target != primaryHostAndPort;
- net->scheduleResponse(
- noi,
- until,
- makeResponseStatus(BSON("ok" << 1 << "term" << 1 << "voteGranted" << voteGranted
- << "reason"
- << "")));
+ net->scheduleResponse(noi,
+ until,
+ makeResponseStatus(BSON("ok" << 1 << "term" << 1 << "voteGranted"
+ << voteGranted << "reason"
+ << "")));
voteRequests++;
}
net->runReadyNetworkOperations();
@@ -1598,17 +1502,14 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
TEST_F(TakeoverTest, PrimaryCatchesUpBeforeCatchupTakeover) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1659,21 +1560,16 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeCatchupTakeover) {
TEST_F(TakeoverTest, PrimaryCatchesUpBeforeHighPriorityNodeCatchupTakeover) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1);
+ << "priority" << 3))
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1743,19 +1639,15 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeHighPriorityNodeCatchupTakeover) {
TEST_F(TakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurrentPrimary) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1790,19 +1682,15 @@ TEST_F(TakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurrent
TEST_F(TakeoverTest, SuccessfulPriorityTakeover) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1845,19 +1733,15 @@ TEST_F(TakeoverTest, SuccessfulPriorityTakeover) {
TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedSameSecond) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
HostAndPort primaryHostAndPort("node2", 12345);
@@ -1924,19 +1808,15 @@ TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedSameSecond) {
TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedDifferentSecond) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
HostAndPort primaryHostAndPort("node2", 12345);
@@ -2004,19 +1884,14 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
// Start up and become electable.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 100)),
+ << "settings" << BSON("heartbeatIntervalMillis" << 100)),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -2044,11 +1919,7 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version"
- << 4
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 4 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -2069,19 +1940,14 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringVotePhase)
// Start up and become electable.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 100)),
+ << "settings" << BSON("heartbeatIntervalMillis" << 100)),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -2094,11 +1960,7 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringVotePhase)
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version"
- << 4
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 4 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -2160,14 +2022,13 @@ protected:
net->getNextReadyRequest(), net->now(), makeHeartbeatResponse(opTime));
} else if (request.cmdObj.firstElement().fieldNameStringData() ==
"replSetRequestVotes") {
- net->scheduleResponse(net->getNextReadyRequest(),
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term"
- << request.cmdObj["term"].Long()
- << "voteGranted"
- << true)));
+ net->scheduleResponse(
+ net->getNextReadyRequest(),
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true)));
} else {
// Stop the loop and let the caller handle unexpected requests.
net->exitNetwork();
@@ -2181,18 +2042,14 @@ protected:
ReplSetConfig setUp3NodeReplSetAndRunForElection(OpTime opTime, long long timeout = 5000) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 1 << "catchUpTimeoutMillis"
<< timeout));
assertStartSuccess(configObj, HostAndPort("node1", 12345));
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 62834fe3d0c..5af7e96e979 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -174,11 +174,11 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
if (replMetadata.isOK() && _rsConfig.isInitialized() && _rsConfig.hasReplicaSetId() &&
replMetadata.getValue().getReplicaSetId().isSet() &&
_rsConfig.getReplicaSetId() != replMetadata.getValue().getReplicaSetId()) {
- responseStatus = Status(ErrorCodes::InvalidReplicaSetConfig,
- str::stream() << "replica set IDs do not match, ours: "
- << _rsConfig.getReplicaSetId()
- << "; remote node's: "
- << replMetadata.getValue().getReplicaSetId());
+ responseStatus =
+ Status(ErrorCodes::InvalidReplicaSetConfig,
+ str::stream()
+ << "replica set IDs do not match, ours: " << _rsConfig.getReplicaSetId()
+ << "; remote node's: " << replMetadata.getValue().getReplicaSetId());
// Ignore metadata.
replMetadata = responseStatus;
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index 53eedf88523..ec9fb647668 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -88,17 +88,14 @@ TEST_F(ReplCoordHBV1Test,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
ReplSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "members"
+ << "version" << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
<< "h2:1")
<< BSON("_id" << 3 << "host"
<< "h3:1"))
- << "protocolVersion"
- << 1));
+ << "protocolVersion" << 1));
init("mySet");
addSelf(HostAndPort("h2", 1));
const Date_t startDate = getNet()->now();
@@ -158,21 +155,18 @@ TEST_F(ReplCoordHBV1Test,
TEST_F(ReplCoordHBV1Test,
ArbiterJoinsExistingReplSetWhenReceivingAConfigContainingTheArbiterViaHeartbeat) {
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
- << "mySet"
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1"
- << "arbiterOnly"
- << true)
- << BSON("_id" << 3 << "host"
- << "h3:1"))
- << "protocolVersion"
- << 1));
+ ReplSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))
+ << "protocolVersion" << 1));
init("mySet");
addSelf(HostAndPort("h2", 1));
const Date_t startDate = getNet()->now();
@@ -236,17 +230,14 @@ TEST_F(ReplCoordHBV1Test,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
ReplSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "members"
+ << "version" << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
<< "h2:1")
<< BSON("_id" << 3 << "host"
<< "h3:1"))
- << "protocolVersion"
- << 1));
+ << "protocolVersion" << 1));
init("mySet");
addSelf(HostAndPort("h4", 1));
const Date_t startDate = getNet()->now();
@@ -321,9 +312,7 @@ TEST_F(ReplCoordHBV1Test,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -336,12 +325,12 @@ TEST_F(ReplCoordHBV1Test,
const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
log() << request.target.toString() << " processing " << request.cmdObj;
- getNet()->scheduleResponse(noi,
- getNet()->now(),
- makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
- << "unauth'd"
- << "code"
- << ErrorCodes::Unauthorized)));
+ getNet()->scheduleResponse(
+ noi,
+ getNet()->now(),
+ makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
+ << "unauth'd"
+ << "code" << ErrorCodes::Unauthorized)));
if (request.target != HostAndPort("node2", 12345) &&
request.cmdObj.firstElement().fieldNameStringData() != "replSetHeartbeat") {
@@ -362,15 +351,11 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
HostAndPort host2("node2:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host" << host2.toString()))
- << "settings"
- << BSON("replicaSetId" << OID::gen())
- << "protocolVersion"
+ << "settings" << BSON("replicaSetId" << OID::gen()) << "protocolVersion"
<< 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -442,10 +427,9 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
ASSERT_EQ(MemberState(MemberState::RS_DOWN).toString(),
MemberState(member["state"].numberInt()).toString());
ASSERT_EQ(member["lastHeartbeatMessage"].String(),
- std::string(str::stream() << "replica set IDs do not match, ours: "
- << rsConfig.getReplicaSetId()
- << "; remote node's: "
- << unexpectedId));
+ std::string(str::stream()
+ << "replica set IDs do not match, ours: " << rsConfig.getReplicaSetId()
+ << "; remote node's: " << unexpectedId));
}
} // namespace
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 3fad34dfe2a..738ff86ef87 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -73,9 +73,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenReconfigReceivedWhileSecondary) {
init();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -99,9 +97,7 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
// start up, become primary, receive uninitializable config
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -117,21 +113,14 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "invalidlyNamedField"
- << 3
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "invalidlyNamedField"
+ << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "node2:12345"
- << "arbiterOnly"
- << true)));
+ << "arbiterOnly" << true)));
const auto opCtx = makeOperationContext();
// ErrorCodes::BadValue should be propagated from ReplSetConfig::initialize()
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
@@ -143,9 +132,7 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
// start up, become primary, receive config with incorrect replset name
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -161,11 +148,7 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "notMySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -181,15 +164,12 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
// start up, become primary, receive config with incorrect replset name
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("replicaSetId" << OID::gen())),
+ << "settings" << BSON("replicaSetId" << OID::gen())),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -201,17 +181,12 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("replicaSetId" << OID::gen()));
+ << "settings" << BSON("replicaSetId" << OID::gen()));
const auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
@@ -224,9 +199,7 @@ TEST_F(ReplCoordTest,
// start up, become primary, validate fails
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -242,11 +215,7 @@ TEST_F(ReplCoordTest,
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << -3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << -3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -266,9 +235,7 @@ void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord,
replCoord->processReplSetInitiate(opCtx,
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -285,17 +252,12 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord,
// Replica set id will be copied from existing configuration.
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"
- << "priority"
- << 3)));
+ << "priority" << 3)));
*status = replCoord->processReplSetReconfig(opCtx, args, &garbage);
}
@@ -305,9 +267,7 @@ TEST_F(ReplCoordTest,
// containing a higher config version
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -348,9 +308,7 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenSavingANewConfigFailsDuringRe
// start up, become primary, saving the config fails
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -377,9 +335,7 @@ TEST_F(ReplCoordTest,
// start up, become primary, reconfig, then before that reconfig concludes, reconfig again
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -404,11 +360,7 @@ TEST_F(ReplCoordTest,
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -443,11 +395,7 @@ TEST_F(ReplCoordTest, NodeReturnsConfigurationInProgressWhenReceivingAReconfigWh
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -464,15 +412,12 @@ TEST_F(ReplCoordTest, PrimaryNodeAcceptsNewConfigWhenReceivingAReconfigWithAComp
// start up, become primary, reconfig successfully
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("replicaSetId" << OID::gen())),
+ << "settings" << BSON("replicaSetId" << OID::gen())),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -512,9 +457,7 @@ TEST_F(
// from reconfig
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -538,11 +481,7 @@ TEST_F(
config
.initialize(BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -581,9 +520,7 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
// start up, become primary, reconfig, while reconfigging receive reconfig via heartbeat
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -614,9 +551,7 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
config
.initialize(BSON("_id"
<< "mySet"
- << "version"
- << 4
- << "members"
+ << "version" << 4 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -652,9 +587,7 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary
init();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -670,11 +603,7 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index a7ff18688e8..ee40d510288 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -120,15 +120,12 @@ void killOperation(OperationContext* opCtx) {
TEST_F(ReplCoordTest, IsMasterIsFalseDuringStepdown) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
auto replCoord = getReplCoord();
@@ -162,9 +159,7 @@ TEST_F(ReplCoordTest, IsMasterIsFalseDuringStepdown) {
TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
@@ -175,13 +170,10 @@ TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig)
TEST_F(ReplCoordTest, NodeEntersArbiterStateWhenStartingUpWithValidLocalConfigWhereItIsAnArbiter) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "node2:12345"))),
HostAndPort("node1", 12345));
@@ -193,9 +185,7 @@ TEST_F(ReplCoordTest, NodeEntersRemovedStateWhenStartingUpWithALocalConfigWhichL
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -212,9 +202,7 @@ TEST_F(ReplCoordTest,
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "notMySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
@@ -255,9 +243,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -271,9 +257,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result2));
@@ -296,9 +280,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"
<< "arbiterOnly"
@@ -327,9 +309,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -347,9 +327,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node4"))),
&result));
@@ -363,9 +341,7 @@ void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) {
replCoord->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345")
<< BSON("_id" << 1 << "host"
@@ -460,9 +436,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "wrongSet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -491,8 +465,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithoutAn_
BSONObjBuilder result1;
auto status = getReplCoord()->processReplSetInitiate(
opCtx.get(),
- BSON("version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node1:12345"))),
+ BSON("version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345"))),
&result1);
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, status);
ASSERT_STRING_CONTAINS(status.reason(), "Missing expected field \"_id\"");
@@ -511,9 +486,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1);
@@ -534,9 +507,7 @@ TEST_F(ReplCoordTest, InitiateFailsWithoutReplSetFlag) {
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -557,9 +528,7 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenInitiateCannotWriteConfigToDi
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -611,13 +580,10 @@ TEST_F(
TEST_F(ReplCoordTest, NodeReturnsOkWhenCheckReplEnabledForCommandAfterReceivingAConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
// check status OK and result is empty
@@ -647,21 +613,16 @@ TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstASta
TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASecondaryNode) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -682,21 +643,16 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASec
TEST_F(ReplCoordTest, NodeReturnsOkWhenRunningAwaitReplicationAgainstPrimaryWithWTermOne) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
OpTimeWithTermOne time(100, 1);
@@ -724,25 +680,19 @@ TEST_F(ReplCoordTest,
NodeReturnsWriteConcernFailedUntilASufficientNumberOfNodesHaveTheWriteDurable) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3))),
+ << "_id" << 3))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -804,25 +754,19 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, NodeReturnsWriteConcernFailedUntilASufficientNumberOfNodesHaveTheWrite) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3))),
+ << "_id" << 3))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -881,9 +825,7 @@ TEST_F(ReplCoordTest,
NodeReturnsUnknownReplWriteConcernWhenAwaitReplicationReceivesAnInvalidWriteConcernMode) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node0")
<< BSON("_id" << 1 << "host"
@@ -920,9 +862,7 @@ TEST_F(
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node0"
<< "tags"
@@ -1102,21 +1042,16 @@ private:
TEST_F(ReplCoordTest, NodeReturnsOkWhenAWriteConcernWithNoTimeoutHasBeenSatisfied) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -1166,21 +1101,16 @@ TEST_F(ReplCoordTest, NodeReturnsOkWhenAWriteConcernWithNoTimeoutHasBeenSatisfie
TEST_F(ReplCoordTest, NodeReturnsWriteConcernFailedWhenAWriteConcernTimesOutBeforeBeingSatisified) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -1217,21 +1147,16 @@ TEST_F(ReplCoordTest,
NodeReturnsShutDownInProgressWhenANodeShutsDownPriorToSatisfyingAWriteConcern) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -1267,21 +1192,16 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenSteppingDownBeforeSatisfyingAWrite
// if the node steps down while it is waiting.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -1315,9 +1235,7 @@ TEST_F(ReplCoordTest,
// Tests that a thread blocked in awaitReplication can be killed by a killOp operation
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1")
<< BSON("_id" << 1 << "host"
@@ -1435,9 +1353,7 @@ private:
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -1462,9 +1378,7 @@ TEST_F(ReplCoordTest, UpdatePositionArgsAdvancesWallTimes) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -1498,33 +1412,27 @@ TEST_F(ReplCoordTest, UpdatePositionArgsAdvancesWallTimes) {
ASSERT_OK(updatePositionArgsInitialize(
updatePositionArgs,
- BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << opTime2.asOpTime().toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << memberOneAppliedWallTime
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << opTime2.asOpTime().toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << memberOneDurableWallTime)
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << opTime2.asOpTime().toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << memberTwoAppliedWallTime
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << opTime2.asOpTime().toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << memberTwoDurableWallTime)))));
+ BSON(
+ UpdatePositionArgs::kCommandFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(
+ UpdatePositionArgs::kConfigVersionFieldName
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << opTime2.asOpTime().toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName << memberOneAppliedWallTime
+ << UpdatePositionArgs::kDurableOpTimeFieldName << opTime2.asOpTime().toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName << memberOneDurableWallTime)
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 2
+ << UpdatePositionArgs::kAppliedOpTimeFieldName
+ << opTime2.asOpTime().toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << memberTwoAppliedWallTime
+ << UpdatePositionArgs::kDurableOpTimeFieldName
+ << opTime2.asOpTime().toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << memberTwoDurableWallTime)))));
ASSERT_OK(repl->processReplSetUpdatePosition(updatePositionArgs, &configVersion));
@@ -1546,17 +1454,14 @@ TEST_F(ReplCoordTest, ElectionIdTracksTermInPV1) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("test1", 1234));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -1610,17 +1515,14 @@ TEST_F(ReplCoordTest, NodeChangesTermAndStepsDownWhenAndOnlyWhenUpdateTermSuppli
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("test1", 1234));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -1656,17 +1558,14 @@ TEST_F(ReplCoordTest, ConcurrentStepDownShouldNotSignalTheSameFinishEventMoreTha
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("test1", 1234));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -1712,17 +1611,14 @@ TEST_F(ReplCoordTest, DrainCompletionMidStepDown) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("test1", 1234));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -1784,12 +1680,9 @@ TEST_F(StepDownTest, StepDownCanCompleteBasedOnReplSetUpdatePositionAlone) {
ASSERT_OK(updatePositionArgsInitialize(
updatePositionArgs,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 1
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime2.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1799,9 +1692,7 @@ TEST_F(StepDownTest, StepDownCanCompleteBasedOnReplSetUpdatePositionAlone) {
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(opTime2.asOpTime().getSecs()))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 2
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime1.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1886,17 +1777,12 @@ private:
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 2 << "host"
<< "test3:1234"))),
HostAndPort("test1", 1234));
@@ -1936,12 +1822,9 @@ TEST_F(StepDownTestWithUnelectableNode,
ASSERT_OK(updatePositionArgsInitialize(
catchupFirstSecondary,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 1
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime2.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1951,9 +1834,7 @@ TEST_F(StepDownTestWithUnelectableNode,
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(opTime2.asOpTime().getSecs()))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 2
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime1.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1975,12 +1856,9 @@ TEST_F(StepDownTestWithUnelectableNode,
ASSERT_OK(updatePositionArgsInitialize(
catchupOtherSecondary,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 1
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime2.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1990,9 +1868,7 @@ TEST_F(StepDownTestWithUnelectableNode,
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(opTime2.asOpTime().getSecs()))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 2
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime2.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -2121,9 +1997,7 @@ private:
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2220,14 +2094,10 @@ TEST_F(ReplCoordTest, SingleNodeReplSetStepDownTimeoutAndElectionTimeoutExpiresA
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 1000)),
HostAndPort("test1", 1234));
auto opCtx = makeOperationContext();
@@ -2256,14 +2126,10 @@ TEST_F(ReplCoordTest, SingleNodeReplSetUnfreeze) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 10000)),
HostAndPort("test1", 1234));
auto opCtx = makeOperationContext();
@@ -2305,9 +2171,7 @@ TEST_F(ReplCoordTest, NodeBecomesPrimaryAgainWhenStepDownTimeoutExpiresInASingle
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -2337,9 +2201,7 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -2693,13 +2555,10 @@ TEST_F(ReplCoordTest,
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
}
@@ -2707,9 +2566,7 @@ TEST_F(ReplCoordTest, NodeIncludesOtherMembersProgressInUpdatePositionCommand) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2786,11 +2643,7 @@ TEST_F(ReplCoordTest,
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2813,11 +2666,7 @@ TEST_F(ReplCoordTest,
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2849,11 +2698,7 @@ TEST_F(ReplCoordTest, AllowAsManyUnsetMaintenanceModesAsThereHaveBeenSetMaintena
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2883,11 +2728,7 @@ TEST_F(ReplCoordTest, SettingAndUnsettingMaintenanceModeShouldNotAffectRollbackS
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2929,11 +2770,7 @@ TEST_F(ReplCoordTest, DoNotAllowMaintenanceModeWhilePrimary) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2968,11 +2805,7 @@ TEST_F(ReplCoordTest, DoNotAllowSettingMaintenanceModeWhileConductingAnElection)
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -3040,9 +2873,7 @@ TEST_F(ReplCoordTest,
HostAndPort client2Host("node3:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << myHost.toString())
<< BSON("_id" << 1 << "host" << client1Host.toString())
<< BSON("_id" << 2 << "host" << client2Host.toString()))),
@@ -3085,9 +2916,7 @@ TEST_F(ReplCoordTest,
HostAndPort client2Host("node3:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << myHost.toString())
<< BSON("_id" << 1 << "host" << client1Host.toString())
<< BSON("_id" << 2 << "host" << client2Host.toString()))),
@@ -3123,19 +2952,14 @@ TEST_F(ReplCoordTest, NodeReturnsNoNodesWhenGetOtherNodesInReplSetIsRunBeforeHav
TEST_F(ReplCoordTest, NodeReturnsListOfNodesOtherThanItselfInResponseToGetOtherNodesInReplSet) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h1")
<< BSON("_id" << 1 << "host"
<< "h2")
<< BSON("_id" << 2 << "host"
<< "h3"
- << "priority"
- << 0
- << "hidden"
- << true))),
+ << "priority" << 0 << "hidden" << true))),
HostAndPort("h1"));
std::vector<HostAndPort> otherNodes = getReplCoord()->getOtherNodesInReplSet();
@@ -3173,9 +2997,7 @@ TEST_F(ReplCoordTest, IsMaster) {
BSON(
"_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << h1.toString())
<< BSON("_id" << 1 << "host" << h2.toString())
<< BSON("_id" << 2 << "host" << h3.toString() << "arbiterOnly" << true)
@@ -3238,9 +3060,7 @@ TEST_F(ReplCoordTest, IsMasterWithCommittedSnapshot) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3270,9 +3090,7 @@ TEST_F(ReplCoordTest, IsMasterInShutdown) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3308,21 +3126,16 @@ TEST_F(ReplCoordTest, LogAMessageWhenShutDownBeforeReplicationStartUpFinished) {
TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -3349,18 +3162,13 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
ASSERT_OK(updatePositionArgsInitialize(
args,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 0
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.toBSON()
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 0
+ << UpdatePositionArgs::kDurableOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs())
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.toBSON()
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs()))))));
@@ -3372,21 +3180,16 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -3407,18 +3210,13 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect)
ASSERT_OK(updatePositionArgsInitialize(
args,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 3
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.toBSON()
+ << 3 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kDurableOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs())
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.toBSON()
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs()))))));
@@ -3435,21 +3233,16 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect)
TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -3470,18 +3263,13 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConf
ASSERT_OK(updatePositionArgsInitialize(
args,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 9
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.toBSON()
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 9
+ << UpdatePositionArgs::kDurableOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs())
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.toBSON()
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs()))))));
@@ -3497,21 +3285,16 @@ TEST_F(ReplCoordTest,
ProcessUpdateWhenUpdatePositionContainsOnlyConfigVersionAndMemberIdsWithoutRIDs) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -3535,32 +3318,26 @@ TEST_F(ReplCoordTest,
ASSERT_OK(updatePositionArgsInitialize(
args,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.asOpTime().toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(time2.asOpTime().getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.asOpTime().toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(time2.asOpTime().getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.asOpTime().toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(time2.asOpTime().getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.asOpTime().toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(time2.asOpTime().getSecs()))))));
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.asOpTime().toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(time2.asOpTime().getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << time2.asOpTime().toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(time2.asOpTime().getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << UpdatePositionArgs::kAppliedOpTimeFieldName
+ << time2.asOpTime().toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(time2.asOpTime().getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName
+ << time2.asOpTime().toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(time2.asOpTime().getSecs()))))));
auto opCtx = makeOperationContext();
@@ -3581,15 +3358,10 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 1 << "host"
<< "node2:12345")
<< BSON("_id" << 2 << "host"
@@ -3600,21 +3372,16 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
TEST_F(ReplCoordTest, AwaitReplicationShouldResolveAsNormalDuringAReconfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
// Turn off readconcern majority support, and snapshots.
@@ -3681,11 +3448,7 @@ void doReplSetReconfigToFewer(ReplicationCoordinatorImpl* replCoord, Status* sta
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -3698,21 +3461,16 @@ TEST_F(
NodeReturnsUnsatisfiableWriteConcernWhenReconfiggingToAClusterThatCannotSatisfyTheWriteConcern) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 2), Date_t() + Seconds(100));
@@ -3759,29 +3517,22 @@ TEST_F(ReplCoordTest,
NodeReturnsOKFromAwaitReplicationWhenReconfiggingToASetWhereMajorityIsSmallerAndSatisfied) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3)
+ << "_id" << 3)
<< BSON("host"
<< "node5:12345"
- << "_id"
- << 4))),
+ << "_id" << 4))),
HostAndPort("node1", 12345));
// Turn off readconcern majority support, and snapshots.
@@ -3842,35 +3593,22 @@ TEST_F(ReplCoordTest,
// satisfied by voting data-bearing members.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "_id" << 3 << "votes" << 0 << "priority" << 0)
<< BSON("host"
<< "node5:12345"
- << "_id"
- << 4
- << "arbiterOnly"
- << true))),
+ << "_id" << 4 << "arbiterOnly" << true))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
OpTime time(Timestamp(100, 1), 1);
@@ -3910,35 +3648,22 @@ TEST_F(ReplCoordTest,
// Test that the commit level advances properly.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "_id" << 3 << "votes" << 0 << "priority" << 0)
<< BSON("host"
<< "node5:12345"
- << "_id"
- << 4
- << "arbiterOnly"
- << true))),
+ << "_id" << 4 << "arbiterOnly" << true))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
OpTime zero(Timestamp(0, 0), 0);
@@ -4170,11 +3895,7 @@ TEST_F(StableOpTimeTest, SetMyLastAppliedSetsStableOpTimeForStorage) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -4238,11 +3959,7 @@ TEST_F(StableOpTimeTest, SetMyLastAppliedSetsStableOpTimeForStorageDisableMajori
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -4275,11 +3992,7 @@ TEST_F(StableOpTimeTest, AdvanceCommitPointSetsStableOpTimeForStorage) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -4337,15 +4050,11 @@ TEST_F(StableOpTimeTest, ClearOpTimeCandidatesPastCommonPointAfterRollback) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))
- << "protocolVersion"
- << 1),
+ << "_id" << 0))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
auto repl = getReplCoord();
@@ -4441,13 +4150,10 @@ TEST_F(StableOpTimeTest, OpTimeCandidatesAreNotAddedWhenStateIsNotConsistent) {
TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDuringShutdown) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(10, 1), Date_t() + Seconds(100));
@@ -4466,13 +4172,10 @@ TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDurin
TEST_F(ReplCoordTest, NodeReturnsInterruptedWhenWaitingUntilAnOpTimeIsInterrupted) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(10, 1), Date_t() + Seconds(100));
@@ -4490,13 +4193,10 @@ TEST_F(ReplCoordTest, NodeReturnsInterruptedWhenWaitingUntilAnOpTimeIsInterrupte
TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesNoOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -4507,13 +4207,10 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesNoOpTi
TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTimePriorToOurLast) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -4529,13 +4226,10 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTi
TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTimeEqualToOurLast) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -4576,13 +4270,10 @@ TEST_F(ReplCoordTest, NodeReturnsNotAReplicaSetWhenWaitUntilOpTimeIsRunAgainstAS
TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -4602,13 +4293,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) {
TEST_F(ReplCoordTest, ReadAfterCommittedInterrupted) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
const auto opCtx = makeOperationContext();
runSingleNodeElection(opCtx.get());
@@ -4625,13 +4313,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedInterrupted) {
TEST_F(ReplCoordTest, ReadAfterCommittedGreaterOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
runSingleNodeElection(opCtx.get());
@@ -4647,13 +4332,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedGreaterOpTime) {
TEST_F(ReplCoordTest, ReadAfterCommittedEqualOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
runSingleNodeElection(opCtx.get());
@@ -4669,13 +4351,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedEqualOpTime) {
TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -4697,13 +4376,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) {
TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
runSingleNodeElection(opCtx.get());
@@ -4727,13 +4403,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
TEST_F(ReplCoordTest, WaitUntilOpTimeforReadRejectsUnsupportedMajorityReadConcern) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -4759,21 +4432,16 @@ TEST_F(ReplCoordTest, IgnoreTheContentsOfMetadataWhenItsConfigVersionDoesNotMatc
// Ensure that we do not process ReplSetMetadata when ConfigVersions do not match.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
@@ -4781,35 +4449,20 @@ TEST_F(ReplCoordTest, IgnoreTheContentsOfMetadataWhenItsConfigVersionDoesNotMatc
StatusWith<rpc::ReplSetMetadata> metadata = replReadFromMetadata(BSON(
rpc::kReplSetMetadataFieldName << BSON(
"lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 2)
- << "configVersion"
- << 1
- << "primaryIndex"
- << 2
- << "term"
- << 2
- << "syncSourceIndex"
- << 1)));
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "configVersion" << 1
+ << "primaryIndex" << 2 << "term" << 2 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata.getValue());
ASSERT_EQUALS(0, getReplCoord()->getTerm());
// higher configVersion
- StatusWith<rpc::ReplSetMetadata> metadata2 = replReadFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 2)
- << "configVersion"
- << 100
- << "primaryIndex"
- << 2
- << "term"
- << 2
- << "syncSourceIndex"
- << 1)));
+ StatusWith<rpc::ReplSetMetadata> metadata2 = replReadFromMetadata(
+ BSON(rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastCommittedWall"
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "configVersion" << 100
+ << "primaryIndex" << 2 << "term" << 2 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata2.getValue());
ASSERT_EQUALS(0, getReplCoord()->getTerm());
}
@@ -4819,23 +4472,17 @@ TEST_F(ReplCoordTest, UpdateLastCommittedOpTimeWhenTheLastCommittedOpTimeIsNewer
// but not if the OpTime is older than the current LastCommittedOpTime.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))
- << "protocolVersion"
- << 1),
+ << "_id" << 2))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
@@ -4865,23 +4512,17 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
// Ensure that currentPrimaryIndex is never altered by ReplSetMetadata.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))
- << "protocolVersion"
- << 1),
+ << "_id" << 2))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
@@ -4892,17 +4533,9 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
StatusWith<rpc::ReplSetMetadata> metadata = replReadFromMetadata(BSON(
rpc::kReplSetMetadataFieldName << BSON(
"lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 3)
- << "configVersion"
- << 2
- << "primaryIndex"
- << 2
- << "term"
- << 3
- << "syncSourceIndex"
- << 1)));
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "configVersion" << 2
+ << "primaryIndex" << 2 << "term" << 3 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata.getValue());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
ASSERT_EQUALS(-1, getTopoCoord().getCurrentPrimaryIndex());
@@ -4912,17 +4545,9 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
StatusWith<rpc::ReplSetMetadata> metadata2 = replReadFromMetadata(BSON(
rpc::kReplSetMetadataFieldName << BSON(
"lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(11, 0) << "t" << 3)
- << "configVersion"
- << 2
- << "primaryIndex"
- << 1
- << "term"
- << 2
- << "syncSourceIndex"
- << 1)));
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "configVersion" << 2
+ << "primaryIndex" << 1 << "term" << 2 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata2.getValue());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
ASSERT_EQUALS(-1, getTopoCoord().getCurrentPrimaryIndex());
@@ -4932,17 +4557,9 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
StatusWith<rpc::ReplSetMetadata> metadata3 = replReadFromMetadata(BSON(
rpc::kReplSetMetadataFieldName << BSON(
"lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(11, 0) << "t" << 3)
- << "configVersion"
- << 2
- << "primaryIndex"
- << 1
- << "term"
- << 3
- << "syncSourceIndex"
- << 1)));
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "configVersion" << 2
+ << "primaryIndex" << 1 << "term" << 3 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata3.getValue());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
ASSERT_EQUALS(-1, getTopoCoord().getCurrentPrimaryIndex());
@@ -4954,19 +4571,14 @@ TEST_F(ReplCoordTest,
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
@@ -4978,19 +4590,12 @@ TEST_F(ReplCoordTest,
// Higher term - should update term but not last committed optime.
StatusWith<rpc::ReplSetMetadata> metadata = replReadFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 3)
- << "configVersion"
- << config.getConfigVersion()
- << "primaryIndex"
- << 1
- << "term"
- << 3
- << "syncSourceIndex"
- << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3)
+ << "lastCommittedWall" << Date_t() + Seconds(100)
+ << "lastOpVisible" << BSON("ts" << Timestamp(10, 0) << "t" << 3)
+ << "configVersion" << config.getConfigVersion() << "primaryIndex"
+ << 1 << "term" << 3 << "syncSourceIndex" << 1)));
BSONObjBuilder responseBuilder;
ASSERT_OK(metadata.getValue().writeToMetadata(&responseBuilder));
@@ -5021,19 +4626,14 @@ TEST_F(ReplCoordTest, LastCommittedOpTimeOnlyUpdatedFromHeartbeatWhenLastApplied
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(), getReplCoord()->getLastCommittedOpTime());
@@ -5103,19 +4703,14 @@ TEST_F(ReplCoordTest, LastCommittedOpTimeOnlyUpdatedFromHeartbeatInFCV42) {
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(), getReplCoord()->getLastCommittedOpTime());
@@ -5182,19 +4777,14 @@ TEST_F(ReplCoordTest, LastCommittedOpTimeOnlyUpdatedFromHeartbeatInFCV42) {
TEST_F(ReplCoordTest, AdvanceCommitPointFromSyncSourceCanSetCommitPointToLastAppliedIgnoringTerm) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(), getReplCoord()->getLastCommittedOpTime());
@@ -5212,23 +4802,17 @@ TEST_F(ReplCoordTest, AdvanceCommitPointFromSyncSourceCanSetCommitPointToLastApp
TEST_F(ReplCoordTest, PrepareOplogQueryMetadata) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))
- << "protocolVersion"
- << 1),
+ << "_id" << 2))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -5276,21 +4860,14 @@ TEST_F(ReplCoordTest, TermAndLastCommittedOpTimeUpdatedFromHeartbeatWhenArbiter)
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0
- << "arbiterOnly"
- << true)
+ << "_id" << 0 << "arbiterOnly" << true)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
@@ -5303,19 +4880,12 @@ TEST_F(ReplCoordTest, TermAndLastCommittedOpTimeUpdatedFromHeartbeatWhenArbiter)
// Higher term - should update term and lastCommittedOpTime since arbiters learn of the
// commit point via heartbeats.
StatusWith<rpc::ReplSetMetadata> metadata = replReadFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 1) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 1) << "t" << 3)
- << "configVersion"
- << config.getConfigVersion()
- << "primaryIndex"
- << 1
- << "term"
- << 3
- << "syncSourceIndex"
- << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 1) << "t" << 3)
+ << "lastCommittedWall" << Date_t() + Seconds(100)
+ << "lastOpVisible" << BSON("ts" << Timestamp(10, 1) << "t" << 3)
+ << "configVersion" << config.getConfigVersion() << "primaryIndex"
+ << 1 << "term" << 3 << "syncSourceIndex" << 1)));
BSONObjBuilder responseBuilder;
ASSERT_OK(metadata.getValue().writeToMetadata(&responseBuilder));
@@ -5346,19 +4916,13 @@ TEST_F(ReplCoordTest,
ScheduleElectionToBeRunInElectionTimeoutFromNowWhenCancelAndRescheduleElectionTimeoutIsRun) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5397,19 +4961,13 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunInRollback) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5428,23 +4986,13 @@ TEST_F(ReplCoordTest,
DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunWhileUnelectable) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0
- << "priority"
- << 0
- << "hidden"
- << true)
+ << "_id" << 0 << "priority" << 0 << "hidden" << true)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
ASSERT_OK(replCoord->setFollowerMode(MemberState::RS_SECONDARY));
@@ -5459,19 +5007,13 @@ TEST_F(ReplCoordTest,
DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunWhileRemoved) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5497,15 +5039,10 @@ TEST_F(ReplCoordTest,
config
.initialize(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 3
- << "members"
+ << "protocolVersion" << 1 << "version" << 3 << "members"
<< BSON_ARRAY(BSON("host"
<< "node2:12345"
- << "_id"
- << 1))))
+ << "_id" << 1))))
.transitional_ignore();
hbResp.setConfig(config);
hbResp.setConfigVersion(3);
@@ -5529,19 +5066,13 @@ TEST_F(ReplCoordTest,
RescheduleElectionTimeoutWhenProcessingHeartbeatResponseFromPrimaryInSameTerm) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5586,19 +5117,13 @@ TEST_F(ReplCoordTest,
DontRescheduleElectionTimeoutWhenProcessingHeartbeatResponseFromPrimaryInDiffertTerm) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5640,19 +5165,13 @@ TEST_F(ReplCoordTest,
CancelAndRescheduleElectionTimeoutWhenProcessingHeartbeatResponseWithoutState) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5692,9 +5211,7 @@ TEST_F(ReplCoordTest, AdvanceCommittedSnapshotToMostRecentSnapshotPriorToOpTimeW
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -5728,9 +5245,7 @@ TEST_F(ReplCoordTest, ZeroCommittedSnapshotWhenAllSnapshotsAreDropped) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -5760,9 +5275,7 @@ TEST_F(ReplCoordTest, DoNotAdvanceCommittedSnapshotWhenAppliedOpTimeChanges) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -5785,13 +5298,10 @@ TEST_F(ReplCoordTest,
NodeChangesMyLastOpTimeWhenAndOnlyWhensetMyLastDurableOpTimeReceivesANewerOpTime4DurableSE) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5815,13 +5325,10 @@ DEATH_TEST_F(ReplCoordTest,
"opTime.getTimestamp() > myLastAppliedOpTime.getTimestamp()") {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5841,13 +5348,10 @@ DEATH_TEST_F(ReplCoordTest,
"opTime.getTimestamp() > myLastAppliedOpTime.getTimestamp()") {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5867,13 +5371,10 @@ DEATH_TEST_F(ReplCoordTest,
"opTime.getTimestamp() < myLastAppliedOpTime.getTimestamp()") {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5893,13 +5394,10 @@ DEATH_TEST_F(ReplCoordTest,
"opTime.getTimestamp() < myLastAppliedOpTime.getTimestamp()") {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5918,18 +5416,14 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("test1", 1234));
OpTime optime(Timestamp(100, 2), 0);
@@ -5992,18 +5486,14 @@ TEST_F(ReplCoordTest, UpdatePositionCmdHasMetadata) {
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("test1", 1234));
OpTime optime(Timestamp(100, 2), 0);
@@ -6033,32 +5523,23 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3)
+ << "_id" << 3)
<< BSON("host"
<< "node5:12345"
- << "_id"
- << 4))
- << "protocolVersion"
- << 1
- << "settings"
+ << "_id" << 4))
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6070,57 +5551,42 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
UpdatePositionArgs args;
ASSERT_OK(updatePositionArgsInitialize(
args,
- BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 3
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 4
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))))));
+ BSON(
+ UpdatePositionArgs::kCommandFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 3
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 4
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
// Become PRIMARY.
@@ -6130,33 +5596,26 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
UpdatePositionArgs args1;
ASSERT_OK(updatePositionArgsInitialize(
args1,
- BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())))),
+ BSON(
+ UpdatePositionArgs::kCommandFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())))),
/*requireWallTime*/ true));
const Date_t startDate = getNet()->now();
getNet()->enterNetwork();
@@ -6198,20 +5657,16 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
ASSERT_OK(updatePositionArgsInitialize(
args2,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))))));
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args2, 0));
hbArgs.setSetName("mySet");
@@ -6241,9 +5696,7 @@ TEST_F(ReplCoordTest, WaitForMemberState) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -6278,9 +5731,7 @@ TEST_F(ReplCoordTest, WaitForDrainFinish) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -6319,13 +5770,10 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault"
- << false),
+ << "writeConcernMajorityJournalDefault" << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -6342,13 +5790,10 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault"
- << true),
+ << "writeConcernMajorityJournalDefault" << true),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -6363,13 +5808,10 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfSync
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault"
- << false),
+ << "writeConcernMajorityJournalDefault" << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -6391,13 +5833,10 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfWMod
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault"
- << false),
+ << "writeConcernMajorityJournalDefault" << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -6414,21 +5853,16 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfWMod
TEST_F(ReplCoordTest, NodeStoresElectionVotes) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
auto time = OpTimeWithTermOne(100, 1);
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6441,15 +5875,9 @@ TEST_F(ReplCoordTest, NodeStoresElectionVotes) {
ReplSetRequestVotesArgs args;
ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "mySet"
- << "term"
- << 7LL
- << "candidateIndex"
- << 2LL
- << "configVersion"
- << 2LL
- << "dryRun"
- << false
- << "lastCommittedOp"
+ << "term" << 7LL << "candidateIndex" << 2LL
+ << "configVersion" << 2LL << "dryRun"
+ << false << "lastCommittedOp"
<< time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
@@ -6468,21 +5896,16 @@ TEST_F(ReplCoordTest, NodeStoresElectionVotes) {
TEST_F(ReplCoordTest, NodeDoesNotStoreDryRunVotes) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
auto time = OpTimeWithTermOne(100, 1);
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6495,15 +5918,9 @@ TEST_F(ReplCoordTest, NodeDoesNotStoreDryRunVotes) {
ReplSetRequestVotesArgs args;
ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "mySet"
- << "term"
- << 7LL
- << "candidateIndex"
- << 2LL
- << "configVersion"
- << 2LL
- << "dryRun"
- << true
- << "lastCommittedOp"
+ << "term" << 7LL << "candidateIndex" << 2LL
+ << "configVersion" << 2LL << "dryRun"
+ << true << "lastCommittedOp"
<< time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
@@ -6524,17 +5941,13 @@ TEST_F(ReplCoordTest, NodeFailsVoteRequestIfItFailsToStoreLastVote) {
// Set up a 2-node replica set config.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
auto time = OpTimeWithTermOne(100, 1);
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6549,18 +5962,12 @@ TEST_F(ReplCoordTest, NodeFailsVoteRequestIfItFailsToStoreLastVote) {
auto opCtx = makeOperationContext();
ReplSetRequestVotesArgs args;
- ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
- << "mySet"
- << "term"
- << initTerm + 1 // term of new candidate.
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 2LL
- << "dryRun"
- << false
- << "lastCommittedOp"
- << time.asOpTime().toBSON())));
+ ASSERT_OK(args.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "mySet"
+ << "term" << initTerm + 1 // term of new candidate.
+ << "candidateIndex" << 1LL << "configVersion" << 2LL << "dryRun"
+ << false << "lastCommittedOp" << time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
// Simulate a failure to write the 'last vote' document. The specific error code isn't
@@ -6585,17 +5992,13 @@ TEST_F(ReplCoordTest, NodeNodesNotGrantVoteIfInTerminalShutdown) {
// Set up a 2-node replica set config.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
auto time = OpTimeWithTermOne(100, 1);
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6610,18 +6013,12 @@ TEST_F(ReplCoordTest, NodeNodesNotGrantVoteIfInTerminalShutdown) {
auto opCtx = makeOperationContext();
ReplSetRequestVotesArgs args;
- ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
- << "mySet"
- << "term"
- << initTerm + 1 // term of new candidate.
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 2LL
- << "dryRun"
- << false
- << "lastCommittedOp"
- << time.asOpTime().toBSON())));
+ ASSERT_OK(args.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "mySet"
+ << "term" << initTerm + 1 // term of new candidate.
+ << "candidateIndex" << 1LL << "configVersion" << 2LL << "dryRun"
+ << false << "lastCommittedOp" << time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
getReplCoord()->enterTerminalShutdown();
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index 82317b139a2..068a769e735 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -276,14 +276,13 @@ void ReplCoordTest::simulateSuccessfulDryRun(
if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
ASSERT_TRUE(request.cmdObj.getBoolField("dryRun"));
onDryRunRequest(request);
- net->scheduleResponse(noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term"
- << request.cmdObj["term"].Long()
- << "voteGranted"
- << true)));
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true)));
voteRequests++;
} else if (consumeHeartbeatV1(noi)) {
// The heartbeat has been consumed.
@@ -345,14 +344,13 @@ void ReplCoordTest::simulateSuccessfulV1ElectionWithoutExitingDrainMode(Date_t e
hbResp.setConfigVersion(rsConfig.getConfigVersion());
net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON()));
} else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
- net->scheduleResponse(noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term"
- << request.cmdObj["term"].Long()
- << "voteGranted"
- << true)));
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true)));
} else {
error() << "Black holing unexpected request to " << request.target << ": "
<< request.cmdObj;
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index e3f26e73513..40c1499ef67 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -66,10 +66,10 @@ namespace mongo {
MONGO_FAIL_POINT_DEFINE(waitInIsMaster);
-using std::unique_ptr;
using std::list;
using std::string;
using std::stringstream;
+using std::unique_ptr;
namespace repl {
namespace {
@@ -328,8 +328,7 @@ public:
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "Unrecognized field of 'internalClient': '"
- << fieldName
- << "'");
+ << fieldName << "'");
}
}
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index fba6d41fd6d..f4dd3e104c5 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -413,8 +413,7 @@ void ReplicationRecoveryImpl::_applyToEndOfOplog(OperationContext* opCtx,
invariant(applyThroughOpTime.getTimestamp() == topOfOplog,
str::stream() << "Did not apply to top of oplog. Applied through: "
<< applyThroughOpTime.toString()
- << ". Top of oplog: "
- << topOfOplog.toString());
+ << ". Top of oplog: " << topOfOplog.toString());
oplogBuffer.shutdown(opCtx);
// We may crash before setting appliedThrough. If we have a stable checkpoint, we will recover
diff --git a/src/mongo/db/repl/replication_recovery_test.cpp b/src/mongo/db/repl/replication_recovery_test.cpp
index 0d12cd58fa8..c97746080e5 100644
--- a/src/mongo/db/repl/replication_recovery_test.cpp
+++ b/src/mongo/db/repl/replication_recovery_test.cpp
@@ -1051,9 +1051,7 @@ TEST_F(ReplicationRecoveryTest, CommitTransactionOplogEntryCorrectlyUpdatesConfi
const auto txnOperations = BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << testNs.toString()
- << "o"
+ << "ns" << testNs.toString() << "o"
<< BSON("_id" << 1)));
const auto prepareDate = Date_t::now();
const auto prepareOp =
@@ -1128,9 +1126,7 @@ TEST_F(ReplicationRecoveryTest,
const auto txnOperations = BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << testNs.toString()
- << "o"
+ << "ns" << testNs.toString() << "o"
<< BSON("_id" << 1)));
const auto prepareDate = Date_t::now();
const auto prepareOp =
diff --git a/src/mongo/db/repl/reporter_test.cpp b/src/mongo/db/repl/reporter_test.cpp
index 6213eb4fe26..f056fee9332 100644
--- a/src/mongo/db/repl/reporter_test.cpp
+++ b/src/mongo/db/repl/reporter_test.cpp
@@ -379,8 +379,7 @@ TEST_F(ReporterTestNoTriggerAtSetUp,
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "newer config"
- << "configVersion"
- << 100));
+ << "configVersion" << 100));
ASSERT_EQUALS(Status(ErrorCodes::InvalidReplicaSetConfig, "invalid config"), reporter->join());
assertReporterDone();
@@ -399,8 +398,7 @@ TEST_F(ReporterTest, InvalidReplicaSetResponseWithSameConfigVersionOnSyncTargetS
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "invalid config"
- << "configVersion"
- << posUpdater->getConfigVersion()));
+ << "configVersion" << posUpdater->getConfigVersion()));
ASSERT_EQUALS(Status(ErrorCodes::InvalidReplicaSetConfig, "invalid config"), reporter->join());
assertReporterDone();
@@ -416,8 +414,7 @@ TEST_F(ReporterTest,
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "newer config"
- << "configVersion"
- << posUpdater->getConfigVersion() + 1));
+ << "configVersion" << posUpdater->getConfigVersion() + 1));
ASSERT_TRUE(reporter->isActive());
}
diff --git a/src/mongo/db/repl/roll_back_local_operations.cpp b/src/mongo/db/repl/roll_back_local_operations.cpp
index 09047074164..1e5b102a595 100644
--- a/src/mongo/db/repl/roll_back_local_operations.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations.cpp
@@ -123,14 +123,11 @@ StatusWith<RollBackLocalOperations::RollbackCommonPoint> RollBackLocalOperations
auto result = _localOplogIterator->next();
if (!result.isOK()) {
return Status(ErrorCodes::NoMatchingDocument,
- str::stream() << "reached beginning of local oplog: {"
- << "scanned: "
- << _scanned
- << ", theirTime: "
- << getTimestamp(operation).toString()
- << ", ourTime: "
- << getTimestamp(_localOplogValue).toString()
- << "}");
+ str::stream()
+ << "reached beginning of local oplog: {"
+ << "scanned: " << _scanned
+ << ", theirTime: " << getTimestamp(operation).toString()
+ << ", ourTime: " << getTimestamp(_localOplogValue).toString() << "}");
}
opAfterCurrentEntry = _localOplogValue.first;
_localOplogValue = result.getValue();
@@ -200,11 +197,8 @@ StatusWith<RollBackLocalOperations::RollbackCommonPoint> syncRollBackLocalOperat
}
return Status(ErrorCodes::NoMatchingDocument,
str::stream() << "reached beginning of remote oplog: {"
- << "them: "
- << remoteOplog.toString()
- << ", theirTime: "
- << theirTime.toString()
- << "}");
+ << "them: " << remoteOplog.toString()
+ << ", theirTime: " << theirTime.toString() << "}");
}
} // namespace repl
diff --git a/src/mongo/db/repl/roll_back_local_operations_test.cpp b/src/mongo/db/repl/roll_back_local_operations_test.cpp
index 1f8a933b67c..67fff417d0a 100644
--- a/src/mongo/db/repl/roll_back_local_operations_test.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations_test.cpp
@@ -50,26 +50,18 @@ BSONObj makeOp(long long seconds, long long term = 1LL) {
auto uuid = unittest::assertGet(UUID::parse("b4c66a44-c1ca-4d86-8d25-12e82fa2de5b"));
return BSON("ts" << Timestamp(seconds, seconds) << "t" << term << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "roll_back_local_operations.test"
- << "ui"
- << uuid);
+ << "ui" << uuid);
}
BSONObj makeOpWithWallClockTime(long count, long wallClockMillis, long long term = 1LL) {
auto uuid = unittest::assertGet(UUID::parse("b4c66a44-c1ca-4d86-8d25-12e82fa2de5b"));
return BSON("ts" << Timestamp(count, count) << "t" << term << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "roll_back_local_operations.test"
- << "ui"
- << uuid
- << "wall"
- << Date_t::fromMillisSinceEpoch(wallClockMillis));
+ << "ui" << uuid << "wall" << Date_t::fromMillisSinceEpoch(wallClockMillis));
};
int recordId = 0;
@@ -150,7 +142,8 @@ TEST(RollBackLocalOperationsTest, RollbackMultipleLocalOperations) {
TEST(RollBackLocalOperationsTest, RollbackOperationFailed) {
auto commonOperation = makeOpAndRecordId(1);
OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(2), commonOperation,
+ makeOpAndRecordId(2),
+ commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
auto rollbackOperation = [&](const BSONObj& operation) {
@@ -175,7 +168,10 @@ TEST(RollBackLocalOperationsTest, EndOfLocalOplog) {
TEST(RollBackLocalOperationsTest, SkipRemoteOperations) {
auto commonOperation = makeOpAndRecordId(1);
OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(5), makeOpAndRecordId(4), makeOpAndRecordId(2), commonOperation,
+ makeOpAndRecordId(5),
+ makeOpAndRecordId(4),
+ makeOpAndRecordId(2),
+ commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
auto i = localOperations.cbegin();
@@ -209,7 +205,8 @@ TEST(RollBackLocalOperationsTest, SkipRemoteOperations) {
TEST(RollBackLocalOperationsTest, SameTimestampDifferentTermsRollbackNoSuchKey) {
auto commonOperation = makeOpAndRecordId(1, 1);
OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(2, 3), commonOperation,
+ makeOpAndRecordId(2, 3),
+ commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
auto rollbackOperation = [&](const BSONObj& operation) {
@@ -242,7 +239,9 @@ TEST(SyncRollBackLocalOperationsTest, RollbackTwoOperations) {
auto commonOperation = makeOpWithWallClockTimeAndRecordId(1, 1 * 5000);
auto firstOpAfterCommonPoint = makeOpWithWallClockTimeAndRecordId(2, 2 * 60 * 60 * 24 * 1000);
OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(3), firstOpAfterCommonPoint, commonOperation,
+ makeOpAndRecordId(3),
+ firstOpAfterCommonPoint,
+ commonOperation,
});
auto i = localOperations.cbegin();
auto result = syncRollBackLocalOperations(OplogInterfaceMock(localOperations),
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index b6aca140721..73c484ec452 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -100,8 +100,9 @@ boost::optional<long long> _parseDroppedCollectionCount(const OplogEntry& oplogE
auto obj2 = oplogEntry.getObject2();
if (!obj2) {
- warning() << "Unable to get collection count from " << desc << " without the o2 "
- "field. oplog op: "
+ warning() << "Unable to get collection count from " << desc
+ << " without the o2 "
+ "field. oplog op: "
<< redact(oplogEntry.toBSON());
return boost::none;
}
@@ -324,10 +325,10 @@ Status RollbackImpl::_transitionToRollback(OperationContext* opCtx) {
auto status =
_replicationCoordinator->setFollowerModeStrict(opCtx, MemberState::RS_ROLLBACK);
if (!status.isOK()) {
- status.addContext(str::stream() << "Cannot transition from "
- << _replicationCoordinator->getMemberState().toString()
- << " to "
- << MemberState(MemberState::RS_ROLLBACK).toString());
+ status.addContext(str::stream()
+ << "Cannot transition from "
+ << _replicationCoordinator->getMemberState().toString() << " to "
+ << MemberState(MemberState::RS_ROLLBACK).toString());
log() << status;
return status;
}
@@ -416,9 +417,9 @@ StatusWith<std::set<NamespaceString>> RollbackImpl::_namespacesForOp(const Oplog
// These commands do not need to be supported by rollback. 'convertToCapped' should
// always be converted to lower level DDL operations, and 'emptycapped' is a
// testing-only command.
- std::string message = str::stream() << "Encountered unsupported command type '"
- << firstElem.fieldName()
- << "' during rollback.";
+ std::string message = str::stream()
+ << "Encountered unsupported command type '" << firstElem.fieldName()
+ << "' during rollback.";
return Status(ErrorCodes::UnrecoverableRollbackError, message);
}
case OplogEntry::CommandType::kCreate:
@@ -594,8 +595,7 @@ void RollbackImpl::_correctRecordStoreCounts(OperationContext* opCtx) {
auto collToScan = autoCollToScan.getCollection();
invariant(coll == collToScan,
str::stream() << "Catalog returned invalid collection: " << nss.ns() << " ("
- << uuid.toString()
- << ")");
+ << uuid.toString() << ")");
auto exec = collToScan->makePlanExecutor(
opCtx, PlanExecutor::INTERRUPT_ONLY, Collection::ScanDirection::kForward);
long long countFromScan = 0;
@@ -816,8 +816,7 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr
const auto uuid = oplogEntry.getUuid().get();
invariant(_countDiffs.find(uuid) == _countDiffs.end(),
str::stream() << "Unexpected existing count diff for " << uuid.toString()
- << " op: "
- << redact(oplogEntry.toBSON()));
+ << " op: " << redact(oplogEntry.toBSON()));
if (auto countResult = _parseDroppedCollectionCount(oplogEntry)) {
PendingDropInfo info;
info.count = *countResult;
@@ -843,10 +842,9 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr
<< "Oplog entry to roll back is unexpectedly missing dropTarget UUID: "
<< redact(oplogEntry.toBSON()));
invariant(_countDiffs.find(dropTargetUUID) == _countDiffs.end(),
- str::stream() << "Unexpected existing count diff for "
- << dropTargetUUID.toString()
- << " op: "
- << redact(oplogEntry.toBSON()));
+ str::stream()
+ << "Unexpected existing count diff for " << dropTargetUUID.toString()
+ << " op: " << redact(oplogEntry.toBSON()));
if (auto countResult = _parseDroppedCollectionCount(oplogEntry)) {
PendingDropInfo info;
info.count = *countResult;
@@ -1012,9 +1010,7 @@ Status RollbackImpl::_checkAgainstTimeLimit(
if (diff > timeLimit) {
return Status(ErrorCodes::UnrecoverableRollbackError,
str::stream() << "not willing to roll back more than " << timeLimit
- << " seconds of data. Have: "
- << diff
- << " seconds.");
+ << " seconds of data. Have: " << diff << " seconds.");
}
} else {
@@ -1044,8 +1040,7 @@ Timestamp RollbackImpl::_findTruncateTimestamp(
invariant(commonPointTime.getStatus());
invariant(commonPointTime.getValue() == commonPointOpTime,
str::stream() << "Common point: " << commonPointOpTime.toString()
- << ", record found: "
- << commonPointTime.getValue().toString());
+ << ", record found: " << commonPointTime.getValue().toString());
// Get the next document, which will be the first document to truncate.
auto truncatePointRecord = oplogCursor->next();
diff --git a/src/mongo/db/repl/rollback_impl.h b/src/mongo/db/repl/rollback_impl.h
index 5343879c633..660231c4dbc 100644
--- a/src/mongo/db/repl/rollback_impl.h
+++ b/src/mongo/db/repl/rollback_impl.h
@@ -284,7 +284,7 @@ public:
virtual const std::vector<BSONObj>& docsDeletedForNamespace_forTest(UUID uuid) const& {
MONGO_UNREACHABLE;
}
- void docsDeletedForNamespace_forTest(UUID)&& = delete;
+ void docsDeletedForNamespace_forTest(UUID) && = delete;
protected:
/**
diff --git a/src/mongo/db/repl/rollback_impl_test.cpp b/src/mongo/db/repl/rollback_impl_test.cpp
index 927219c46db..98ac60952ca 100644
--- a/src/mongo/db/repl/rollback_impl_test.cpp
+++ b/src/mongo/db/repl/rollback_impl_test.cpp
@@ -69,37 +69,21 @@ std::string kGenericUUIDStr = "b4c66a44-c1ca-4d86-8d25-12e82fa2de5b";
BSONObj makeInsertOplogEntry(long long time, BSONObj obj, StringData ns, UUID uuid) {
return BSON("ts" << Timestamp(time, time) << "t" << time << "op"
<< "i"
- << "o"
- << obj
- << "ns"
- << ns
- << "ui"
- << uuid);
+ << "o" << obj << "ns" << ns << "ui" << uuid);
}
BSONObj makeUpdateOplogEntry(
long long time, BSONObj query, BSONObj update, StringData ns, UUID uuid) {
return BSON("ts" << Timestamp(time, time) << "t" << time << "op"
<< "u"
- << "ns"
- << ns
- << "ui"
- << uuid
- << "o2"
- << query
- << "o"
+ << "ns" << ns << "ui" << uuid << "o2" << query << "o"
<< BSON("$set" << update));
}
BSONObj makeDeleteOplogEntry(long long time, BSONObj id, StringData ns, UUID uuid) {
return BSON("ts" << Timestamp(time, time) << "t" << time << "op"
<< "d"
- << "ns"
- << ns
- << "ui"
- << uuid
- << "o"
- << id);
+ << "ns" << ns << "ui" << uuid << "o" << id);
}
class RollbackImplForTest final : public RollbackImpl {
@@ -380,12 +364,7 @@ BSONObj makeOp(OpTime time) {
auto kGenericUUID = unittest::assertGet(UUID::parse(kGenericUUIDStr));
return BSON("ts" << time.getTimestamp() << "t" << time.getTerm() << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
- << nss.ns()
- << "ui"
- << kGenericUUID);
+ << "o" << BSONObj() << "ns" << nss.ns() << "ui" << kGenericUUID);
}
BSONObj makeOp(int count) {
@@ -400,13 +379,9 @@ auto makeOpWithWallClockTime(long count, long wallClockMillis) {
auto kGenericUUID = unittest::assertGet(UUID::parse(kGenericUUIDStr));
return BSON("ts" << Timestamp(count, count) << "t" << (long long)count << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "top"
- << "ui"
- << kGenericUUID
- << "wall"
+ << "ui" << kGenericUUID << "wall"
<< Date_t::fromMillisSinceEpoch(wallClockMillis));
};
@@ -955,14 +930,10 @@ TEST_F(RollbackImplTest, RollbackDoesNotWriteRollbackFilesIfNoInsertsOrUpdatesAf
const auto uuid = UUID::gen();
const auto nss = NamespaceString("db.coll");
const auto coll = _initializeCollection(_opCtx.get(), uuid, nss);
- const auto oplogEntry = BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op"
- << "c"
- << "o"
- << BSON("create" << nss.coll())
- << "ns"
- << nss.ns()
- << "ui"
- << uuid);
+ const auto oplogEntry =
+ BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op"
+ << "c"
+ << "o" << BSON("create" << nss.coll()) << "ns" << nss.ns() << "ui" << uuid);
ASSERT_OK(_insertOplogEntry(oplogEntry));
ASSERT_OK(_rollback->runRollback(_opCtx.get()));
@@ -1183,12 +1154,7 @@ TEST_F(RollbackImplTest, RollbackProperlySavesFilesWhenInsertsAndDropOfCollectio
const auto oplogEntry =
BSON("ts" << dropOpTime.getTimestamp() << "t" << dropOpTime.getTerm() << "op"
<< "c"
- << "o"
- << BSON("drop" << nss.coll())
- << "ns"
- << nss.ns()
- << "ui"
- << uuid);
+ << "o" << BSON("drop" << nss.coll()) << "ns" << nss.ns() << "ui" << uuid);
ASSERT_OK(_insertOplogEntry(oplogEntry));
ASSERT_OK(_rollback->runRollback(_opCtx.get()));
@@ -1213,14 +1179,10 @@ TEST_F(RollbackImplTest, RollbackProperlySavesFilesWhenCreateCollAndInsertsAreRo
const auto nss = NamespaceString("db.people");
const auto uuid = UUID::gen();
const auto coll = _initializeCollection(_opCtx.get(), uuid, nss);
- const auto oplogEntry = BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op"
- << "c"
- << "o"
- << BSON("create" << nss.coll())
- << "ns"
- << nss.ns()
- << "ui"
- << uuid);
+ const auto oplogEntry =
+ BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op"
+ << "c"
+ << "o" << BSON("create" << nss.coll()) << "ns" << nss.ns() << "ui" << uuid);
ASSERT_OK(_insertOplogEntry(oplogEntry));
// Insert documents into the collection.
@@ -1584,14 +1546,14 @@ public:
void assertRollbackInfoContainsObjectForUUID(UUID uuid, BSONObj bson) {
const auto& uuidToIdMap = _rbInfo.rollbackDeletedIdsMap;
auto search = uuidToIdMap.find(uuid);
- ASSERT(search != uuidToIdMap.end()) << "map is unexpectedly missing an entry for uuid "
- << uuid.toString() << " containing object "
- << bson.jsonString();
+ ASSERT(search != uuidToIdMap.end())
+ << "map is unexpectedly missing an entry for uuid " << uuid.toString()
+ << " containing object " << bson.jsonString();
const auto& idObjSet = search->second;
const auto iter = idObjSet.find(bson);
- ASSERT(iter != idObjSet.end()) << "_id object set is unexpectedly missing object "
- << bson.jsonString() << " in namespace with uuid "
- << uuid.toString();
+ ASSERT(iter != idObjSet.end())
+ << "_id object set is unexpectedly missing object " << bson.jsonString()
+ << " in namespace with uuid " << uuid.toString();
}
@@ -1675,12 +1637,12 @@ TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfDropColl
TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfCreateIndexOplogEntry) {
auto nss = NamespaceString("test", "coll");
- auto indexObj = BSON("createIndexes" << nss.coll() << "ns" << nss.toString() << "v"
- << static_cast<int>(IndexDescriptor::IndexVersion::kV2)
- << "key"
- << "x"
- << "name"
- << "x_1");
+ auto indexObj =
+ BSON("createIndexes" << nss.coll() << "ns" << nss.toString() << "v"
+ << static_cast<int>(IndexDescriptor::IndexVersion::kV2) << "key"
+ << "x"
+ << "name"
+ << "x_1");
auto cmdOp =
makeCommandOp(Timestamp(2, 2), UUID::gen(), nss.getCommandNS().toString(), indexObj, 2);
diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp
index 2170ee1663d..9af1717b4ab 100644
--- a/src/mongo/db/repl/rollback_source_impl.cpp
+++ b/src/mongo/db/repl/rollback_source_impl.cpp
@@ -105,9 +105,7 @@ StatusWith<BSONObj> RollbackSourceImpl::getCollectionInfoByUUID(const std::strin
return StatusWith<BSONObj>(ErrorCodes::NoSuchKey,
str::stream()
<< "No collection info found for collection with uuid: "
- << uuid.toString()
- << " in db: "
- << db);
+ << uuid.toString() << " in db: " << db);
}
invariant(info.size() == 1U);
return info.front();
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index 46baaac89be..b2afd664b94 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -295,12 +295,9 @@ void RollbackResyncsCollectionOptionsTest::resyncCollectionOptionsTest(
auto commonOpUuid = unittest::assertGet(UUID::parse("f005ba11-cafe-bead-f00d-123456789abc"));
auto commonOpBson = BSON("ts" << Timestamp(1, 1) << "t" << 1LL << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "rollback_test.test"
- << "ui"
- << commonOpUuid);
+ << "ui" << commonOpUuid);
auto commonOperation = std::make_pair(commonOpBson, RecordId(1));
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 11ff4960681..2ef523e34b0 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -83,13 +83,13 @@
namespace mongo {
-using std::shared_ptr;
-using std::unique_ptr;
using std::list;
using std::map;
+using std::pair;
using std::set;
+using std::shared_ptr;
using std::string;
-using std::pair;
+using std::unique_ptr;
namespace repl {
@@ -199,10 +199,10 @@ Status FixUpInfo::recordDropTargetInfo(const BSONElement& dropTarget,
OpTime opTime) {
StatusWith<UUID> dropTargetUUIDStatus = UUID::parse(dropTarget);
if (!dropTargetUUIDStatus.isOK()) {
- std::string message = str::stream() << "Unable to roll back renameCollection. Cannot parse "
- "dropTarget UUID. Returned status: "
- << redact(dropTargetUUIDStatus.getStatus())
- << ", oplog entry: " << redact(obj);
+ std::string message = str::stream()
+ << "Unable to roll back renameCollection. Cannot parse "
+ "dropTarget UUID. Returned status: "
+ << redact(dropTargetUUIDStatus.getStatus()) << ", oplog entry: " << redact(obj);
error() << message;
return dropTargetUUIDStatus.getStatus();
}
@@ -227,8 +227,8 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
// Checks that the oplog entry is smaller than 512 MB. We do not roll back if the
// oplog entry is larger than 512 MB.
if (ourObj.objsize() > 512 * 1024 * 1024)
- throw RSFatalException(str::stream() << "Rollback too large, oplog size: "
- << ourObj.objsize());
+ throw RSFatalException(str::stream()
+ << "Rollback too large, oplog size: " << ourObj.objsize());
// If required fields are not present in the BSONObj for an applyOps entry, create these fields
// and populate them with dummy values before parsing ourObj as an oplog entry.
@@ -1235,8 +1235,9 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
// is rolled back upstream and we restart, we expect to still have the
// collection.
- log() << nss->ns() << " not found on remote host, so we do not roll back collmod "
- "operation. Instead, we will drop the collection soon.";
+ log() << nss->ns()
+ << " not found on remote host, so we do not roll back collmod "
+ "operation. Instead, we will drop the collection soon.";
continue;
}
@@ -1246,10 +1247,10 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
// Updates the collection flags.
if (auto optionsField = info["options"]) {
if (optionsField.type() != Object) {
- throw RSFatalException(str::stream() << "Failed to parse options " << info
- << ": expected 'options' to be an "
- << "Object, got "
- << typeName(optionsField.type()));
+ throw RSFatalException(str::stream()
+ << "Failed to parse options " << info
+ << ": expected 'options' to be an "
+ << "Object, got " << typeName(optionsField.type()));
}
// Removes the option.uuid field. We do not allow the options.uuid field
@@ -1261,8 +1262,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
auto status = options.parse(optionsFieldObj, CollectionOptions::parseForCommand);
if (!status.isOK()) {
throw RSFatalException(str::stream() << "Failed to parse options " << info
- << ": "
- << status.toString());
+ << ": " << status.toString());
}
// TODO(SERVER-27992): Set options.uuid.
@@ -1281,13 +1281,10 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
auto validatorStatus = collection->updateValidator(
opCtx, options.validator, options.validationLevel, options.validationAction);
if (!validatorStatus.isOK()) {
- throw RSFatalException(
- str::stream() << "Failed to update validator for " << nss->toString() << " ("
- << uuid
- << ") with "
- << redact(info)
- << ". Got: "
- << validatorStatus.toString());
+ throw RSFatalException(str::stream()
+ << "Failed to update validator for " << nss->toString()
+ << " (" << uuid << ") with " << redact(info)
+ << ". Got: " << validatorStatus.toString());
}
wuow.commit();
@@ -1377,8 +1374,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
<< " to archive file: " << redact(status);
throw RSFatalException(str::stream()
<< "Rollback cannot write document in namespace "
- << nss->ns()
- << " to archive file.");
+ << nss->ns() << " to archive file.");
}
} else {
error() << "Rollback cannot find object: " << pattern << " in namespace "
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index bfacdd849cb..991d0851afc 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -81,21 +81,16 @@ OplogInterfaceMock::Operation makeDropIndexOplogEntry(Collection* collection,
BSONObj key,
std::string indexName,
int time) {
- auto indexSpec =
- BSON("ns" << collection->ns().ns() << "key" << key << "name" << indexName << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("ns" << collection->ns().ns() << "key" << key << "name" << indexName
+ << "v" << static_cast<int>(kIndexVersion));
return std::make_pair(
BSON("ts" << Timestamp(Seconds(time), 0) << "op"
<< "c"
- << "ui"
- << collection->uuid().get()
- << "ns"
+ << "ui" << collection->uuid().get() << "ns"
<< "test.$cmd"
- << "o"
- << BSON("dropIndexes" << collection->ns().coll() << "index" << indexName)
- << "o2"
- << indexSpec),
+ << "o" << BSON("dropIndexes" << collection->ns().coll() << "index" << indexName)
+ << "o2" << indexSpec),
RecordId(time));
}
@@ -103,22 +98,15 @@ OplogInterfaceMock::Operation makeCreateIndexOplogEntry(Collection* collection,
BSONObj key,
std::string indexName,
int time) {
- auto indexSpec =
- BSON("createIndexes" << collection->ns().coll() << "ns" << collection->ns().ns() << "v"
- << static_cast<int>(kIndexVersion)
- << "key"
- << key
- << "name"
- << indexName);
+ auto indexSpec = BSON(
+ "createIndexes" << collection->ns().coll() << "ns" << collection->ns().ns() << "v"
+ << static_cast<int>(kIndexVersion) << "key" << key << "name" << indexName);
return std::make_pair(BSON("ts" << Timestamp(Seconds(time), 0) << "op"
<< "c"
<< "ns"
<< "test.$cmd"
- << "ui"
- << collection->uuid().get()
- << "o"
- << indexSpec),
+ << "ui" << collection->uuid().get() << "o" << indexSpec),
RecordId(time));
}
@@ -140,11 +128,7 @@ OplogInterfaceMock::Operation makeRenameCollectionOplogEntry(const NamespaceStri
}
return std::make_pair(BSON("ts" << opTime.getTimestamp() << "t" << opTime.getTerm() << "op"
<< "c"
- << "ui"
- << collectionUUID
- << "ns"
- << renameFrom.ns()
- << "o"
+ << "ui" << collectionUUID << "ns" << renameFrom.ns() << "o"
<< obj),
RecordId(opTime.getTimestamp().getSecs()));
}
@@ -153,12 +137,9 @@ BSONObj makeOp(long long seconds) {
auto uuid = unittest::assertGet(UUID::parse("f005ba11-cafe-bead-f00d-123456789abc"));
return BSON("ts" << Timestamp(seconds, seconds) << "t" << seconds << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "rs_rollback.test"
- << "ui"
- << uuid);
+ << "ui" << uuid);
}
int recordId = 0;
@@ -294,12 +275,9 @@ int _testRollbackDelete(OperationContext* opCtx,
auto commonOperation = makeOpAndRecordId(1);
auto deleteOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "d"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 0)),
+ << "o" << BSON("_id" << 0)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -423,12 +401,9 @@ TEST_F(RSRollbackTest, RollbackInsertDocumentWithNoId) {
auto commonOperation = makeOpAndRecordId(1);
auto insertDocumentOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ns"
+ << "ui" << UUID::gen() << "ns"
<< "test.t"
- << "o"
- << BSON("a" << 1)),
+ << "o" << BSON("a" << 1)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -467,8 +442,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
NamespaceString nss("test", "coll");
auto collection = _createCollection(_opCtx.get(), nss.toString(), options);
auto indexSpec = BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON("a" << 1)
- << "name"
+ << BSON("a" << 1) << "name"
<< "a_1");
int numIndexes = _createIndexOnEmptyCollection(_opCtx.get(), collection, nss, indexSpec);
@@ -492,13 +466,11 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
_coordinator,
_replicationProcess.get()));
stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining(str::stream()
- << "Dropped index in rollback for collection: "
- << nss.toString()
- << ", UUID: "
- << options.uuid->toString()
- << ", index: a_1"));
+ ASSERT_EQUALS(
+ 1,
+ countLogLinesContaining(str::stream()
+ << "Dropped index in rollback for collection: " << nss.toString()
+ << ", UUID: " << options.uuid->toString() << ", index: a_1"));
{
Lock::DBLock dbLock(_opCtx.get(), nss.db(), MODE_S);
auto indexCatalog = collection->getIndexCatalog();
@@ -514,9 +486,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
auto collection = _createCollection(_opCtx.get(), "test.t", options);
auto indexSpec = BSON("ns"
<< "test.t"
- << "key"
- << BSON("a" << 1)
- << "name"
+ << "key" << BSON("a" << 1) << "name"
<< "a_1");
// Skip index creation to trigger warning during rollback.
{
@@ -665,9 +635,7 @@ TEST_F(RSRollbackTest, RollingBackCreateIndexAndRenameWithLongName) {
auto longName = std::string(115, 'a');
auto indexSpec = BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON("b" << 1)
- << "name"
- << longName);
+ << BSON("b" << 1) << "name" << longName);
int numIndexes = _createIndexOnEmptyCollection(_opCtx.get(), collection, nss, indexSpec);
ASSERT_EQUALS(2, numIndexes);
@@ -720,8 +688,7 @@ TEST_F(RSRollbackTest, RollingBackDropAndCreateOfSameIndexNameWithDifferentSpecs
auto collection = _createCollection(_opCtx.get(), nss.toString(), options);
auto indexSpec = BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON("b" << 1)
- << "name"
+ << BSON("b" << 1) << "name"
<< "a_1");
int numIndexes = _createIndexOnEmptyCollection(_opCtx.get(), collection, nss, indexSpec);
@@ -752,19 +719,15 @@ TEST_F(RSRollbackTest, RollingBackDropAndCreateOfSameIndexNameWithDifferentSpecs
ASSERT(indexCatalog);
ASSERT_EQUALS(2, indexCatalog->numIndexesReady(_opCtx.get()));
ASSERT_EQUALS(1,
- countLogLinesContaining(str::stream()
- << "Dropped index in rollback for collection: "
- << nss.toString()
- << ", UUID: "
- << options.uuid->toString()
- << ", index: a_1"));
+ countLogLinesContaining(
+ str::stream()
+ << "Dropped index in rollback for collection: " << nss.toString()
+ << ", UUID: " << options.uuid->toString() << ", index: a_1"));
ASSERT_EQUALS(1,
- countLogLinesContaining(str::stream()
- << "Created index in rollback for collection: "
- << nss.toString()
- << ", UUID: "
- << options.uuid->toString()
- << ", index: a_1"));
+ countLogLinesContaining(
+ str::stream()
+ << "Created index in rollback for collection: " << nss.toString()
+ << ", UUID: " << options.uuid->toString() << ", index: a_1"));
std::vector<const IndexDescriptor*> indexes;
indexCatalog->findIndexesByKeyPattern(_opCtx.get(), BSON("a" << 1), false, &indexes);
ASSERT(indexes.size() == 1);
@@ -786,20 +749,15 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) {
<< "t"
<< "ns"
<< "test.t"
- << "v"
- << static_cast<int>(kIndexVersion)
- << "key"
- << BSON("a" << 1));
-
- auto createIndexOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
- << "c"
- << "ns"
- << "test.$cmd"
- << "ui"
- << collection->uuid().get()
- << "o"
- << command),
- RecordId(2));
+ << "v" << static_cast<int>(kIndexVersion) << "key" << BSON("a" << 1));
+
+ auto createIndexOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
+ << "c"
+ << "ns"
+ << "test.$cmd"
+ << "ui" << collection->uuid().get() << "o" << command),
+ RecordId(2));
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
})));
@@ -829,9 +787,7 @@ std::string idxName(std::string id) {
// Create an index spec object given the namespace and the index 'id'.
BSONObj idxSpec(NamespaceString nss, std::string id) {
return BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON(idxKey(id) << 1)
- << "name"
- << idxName(id));
+ << BSON(idxKey(id) << 1) << "name" << idxName(id));
}
// Returns the number of indexes that exist on the given collection.
@@ -954,9 +910,7 @@ TEST_F(RSRollbackTest, RollbackCreateDropRecreateIndexOnCollection) {
// Create the necessary indexes. Index 0 is created, dropped, and created again in the
// sequence of ops, so we create that index.
auto indexSpec = BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON(idxKey("0") << 1)
- << "name"
- << idxName("0"));
+ << BSON(idxKey("0") << 1) << "name" << idxName("0"));
int numIndexes = _createIndexOnEmptyCollection(_opCtx.get(), coll, nss, indexSpec);
ASSERT_EQUALS(2, numIndexes);
@@ -991,9 +945,7 @@ TEST_F(RSRollbackTest, RollbackUnknownCommand) {
auto commonOperation = makeOpAndRecordId(1);
auto unknownCommandOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "c"
- << "ui"
- << UUID::gen()
- << "ns"
+ << "ui" << UUID::gen() << "ns"
<< "test.t"
<< "o"
<< BSON("convertToCapped"
@@ -1027,9 +979,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommand) {
auto dropCollectionOperation =
std::make_pair(BSON("ts" << dropTime.getTimestamp() << "t" << dropTime.getTerm() << "op"
<< "c"
- << "ui"
- << coll->uuid().get()
- << "ns"
+ << "ui" << coll->uuid().get() << "ns"
<< "test.t"
<< "o"
<< BSON("drop"
@@ -1351,9 +1301,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionThenRenameCollectionToDroppedCollec
auto dropCollectionOperation =
std::make_pair(BSON("ts" << dropTime.getTimestamp() << "t" << dropTime.getTerm() << "op"
<< "c"
- << "ui"
- << droppedCollectionUUID
- << "ns"
+ << "ui" << droppedCollectionUUID << "ns"
<< "test.x"
<< "o"
<< BSON("drop"
@@ -1423,16 +1371,15 @@ TEST_F(RSRollbackTest, RollbackRenameCollectionThenCreateNewCollectionWithOldNam
false,
OpTime(Timestamp(2, 0), 5));
- auto createCollectionOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(3), 0) << "op"
- << "c"
- << "ui"
- << createdCollectionUUID
- << "ns"
- << "test.x"
- << "o"
- << BSON("create"
- << "x")),
- RecordId(3));
+ auto createCollectionOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(3), 0) << "op"
+ << "c"
+ << "ui" << createdCollectionUUID << "ns"
+ << "test.x"
+ << "o"
+ << BSON("create"
+ << "x")),
+ RecordId(3));
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
@@ -1473,9 +1420,7 @@ TEST_F(RSRollbackTest, RollbackCollModCommandFailsIfRBIDChangesWhileSyncingColle
auto commonOperation = makeOpAndRecordId(1);
auto collModOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "c"
- << "ui"
- << coll->uuid().get()
- << "ns"
+ << "ui" << coll->uuid().get() << "ns"
<< "test.t"
<< "o"
<< BSON("collMod"
@@ -1519,8 +1464,7 @@ TEST_F(RSRollbackTest, RollbackDropDatabaseCommand) {
<< "c"
<< "ns"
<< "test.$cmd"
- << "o"
- << BSON("dropDatabase" << 1)),
+ << "o" << BSON("dropDatabase" << 1)),
RecordId(2));
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
@@ -1588,93 +1532,47 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
UUID uuid = coll->uuid().get();
const auto commonOperation = makeOpAndRecordId(1);
const auto applyOpsOperation =
- std::make_pair(makeApplyOpsOplogEntry(Timestamp(Seconds(2), 0),
- {BSON("op"
- << "u"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o2"
- << BSON("_id" << 1)
- << "o"
- << BSON("_id" << 1 << "v" << 2)),
- BSON("op"
- << "u"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(2, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o2"
- << BSON("_id" << 2)
- << "o"
- << BSON("_id" << 2 << "v" << 4)),
- BSON("op"
- << "d"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(3, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 3)),
- BSON("op"
- << "i"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(4, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 4)),
- // applyOps internal oplog entries are not required
- // to have a timestamp.
- BSON("op"
- << "i"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(4, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 4)),
- BSON("op"
- << "i"
- << "ui"
- << uuid
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 4)),
- BSON("op"
- << "i"
- << "ui"
- << uuid
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 4))}),
+ std::make_pair(makeApplyOpsOplogEntry(
+ Timestamp(Seconds(2), 0),
+ {BSON("op"
+ << "u"
+ << "ui" << uuid << "ts" << Timestamp(1, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o2" << BSON("_id" << 1) << "o"
+ << BSON("_id" << 1 << "v" << 2)),
+ BSON("op"
+ << "u"
+ << "ui" << uuid << "ts" << Timestamp(2, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o2" << BSON("_id" << 2) << "o"
+ << BSON("_id" << 2 << "v" << 4)),
+ BSON("op"
+ << "d"
+ << "ui" << uuid << "ts" << Timestamp(3, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 3)),
+ BSON("op"
+ << "i"
+ << "ui" << uuid << "ts" << Timestamp(4, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 4)),
+ // applyOps internal oplog entries are not required
+ // to have a timestamp.
+ BSON("op"
+ << "i"
+ << "ui" << uuid << "ts" << Timestamp(4, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 4)),
+ BSON("op"
+ << "i"
+ << "ui" << uuid << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 4)),
+ BSON("op"
+ << "i"
+ << "ui" << uuid << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 4))}),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
@@ -1742,9 +1640,7 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
auto commonOperation = makeOpAndRecordId(1);
auto createCollectionOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "c"
- << "ui"
- << coll->uuid().get()
- << "ns"
+ << "ui" << coll->uuid().get() << "ns"
<< "test.t"
<< "o"
<< BSON("create"
@@ -1972,31 +1868,19 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommandInvalidCollectionOpt
TEST(RSRollbackTest, LocalEntryWithoutNsIsFatal) {
const auto validOplogEntry = BSON("op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, validOplogEntry, false));
const auto invalidOplogEntry = BSON("op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< ""
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, invalidOplogEntry, false),
RSFatalException);
@@ -2005,31 +1889,19 @@ TEST(RSRollbackTest, LocalEntryWithoutNsIsFatal) {
TEST(RSRollbackTest, LocalEntryWithoutOIsFatal) {
const auto validOplogEntry = BSON("op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, validOplogEntry, false));
const auto invalidOplogEntry = BSON("op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSONObj());
+ << "o" << BSONObj());
ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, invalidOplogEntry, false),
RSFatalException);
@@ -2038,16 +1910,10 @@ TEST(RSRollbackTest, LocalEntryWithoutOIsFatal) {
DEATH_TEST_F(RSRollbackTest, LocalUpdateEntryWithoutO2IsFatal, "Fatal Assertion") {
const auto invalidOplogEntry = BSON("op"
<< "u"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
FixUpInfo fui;
updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, invalidOplogEntry, false)
@@ -2057,34 +1923,20 @@ DEATH_TEST_F(RSRollbackTest, LocalUpdateEntryWithoutO2IsFatal, "Fatal Assertion"
TEST(RSRollbackTest, LocalUpdateEntryWithEmptyO2IsFatal) {
const auto validOplogEntry = BSON("op"
<< "u"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "o2"
+ << "o" << BSON("_id" << 1 << "a" << 1) << "o2"
<< BSON("_id" << 1));
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, validOplogEntry, false));
const auto invalidOplogEntry = BSON("op"
<< "u"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "o2"
+ << "o" << BSON("_id" << 1 << "a" << 1) << "o2"
<< BSONObj());
ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, invalidOplogEntry, false),
@@ -2094,12 +1946,9 @@ TEST(RSRollbackTest, LocalUpdateEntryWithEmptyO2IsFatal) {
DEATH_TEST_F(RSRollbackTest, LocalEntryWithTxnNumberWithoutSessionIdIsFatal, "invariant") {
auto validOplogEntry = BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL << "op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ns"
+ << "ui" << UUID::gen() << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, validOplogEntry, false));
@@ -2120,18 +1969,10 @@ TEST_F(RSRollbackTest, LocalEntryWithTxnNumberWithoutTxnTableUUIDIsFatal) {
auto lsid = makeLogicalSessionIdForTest();
auto entryWithTxnNumber = BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL << "op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON());
+ << "o" << BSON("_id" << 1 << "a" << 1) << "txnNumber" << 1LL
+ << "stmtId" << 1 << "lsid" << lsid.toBSON());
FixUpInfo fui;
ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(
@@ -2145,12 +1986,9 @@ TEST_F(RSRollbackTest, LocalEntryWithTxnNumberAddsTransactionTableDocToBeRefetch
// With no txnNumber present, no extra documents need to be refetched.
auto entryWithoutTxnNumber = BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL << "op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ns"
+ << "ui" << UUID::gen() << "ns"
<< "test.t2"
- << "o"
- << BSON("_id" << 2 << "a" << 2));
+ << "o" << BSON("_id" << 2 << "a" << 2));
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, entryWithoutTxnNumber, false));
@@ -2163,18 +2001,10 @@ TEST_F(RSRollbackTest, LocalEntryWithTxnNumberAddsTransactionTableDocToBeRefetch
auto lsid = makeLogicalSessionIdForTest();
auto entryWithTxnNumber = BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL << "op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON());
+ << "o" << BSON("_id" << 1 << "a" << 1) << "txnNumber" << 1LL
+ << "stmtId" << 1 << "lsid" << lsid.toBSON());
UUID transactionTableUUID = UUID::gen();
fui.transactionTableUUID = transactionTableUUID;
@@ -2204,20 +2034,11 @@ TEST_F(RSRollbackTest, LocalEntryWithPartialTxnAddsTransactionTableDocToBeRefetc
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON());
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 1 << "lsid" << lsid.toBSON());
UUID transactionTableUUID = UUID::gen();
fui.transactionTableUUID = transactionTableUUID;
@@ -2240,15 +2061,8 @@ TEST_F(RSRollbackTest, LocalAbortTxnRefetchesTransactionTableEntry) {
<< "c"
<< "ns"
<< "admin.$cmd"
- << "o"
- << BSON("abortTransaction" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("abortTransaction" << 1) << "txnNumber" << 1LL
+ << "stmtId" << 1 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL));
UUID transactionTableUUID = UUID::gen();
@@ -2276,15 +2090,8 @@ TEST_F(RSRollbackTest, LocalEntryWithAbortedPartialTxnRefetchesOnlyTransactionTa
<< "c"
<< "ns"
<< "admin.$cmd"
- << "o"
- << BSON("abortTransaction" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("abortTransaction" << 1) << "txnNumber" << 1LL
+ << "stmtId" << 1 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(1), 1) << "t" << 1LL));
auto entryWithTxnNumber =
@@ -2295,20 +2102,11 @@ TEST_F(RSRollbackTest, LocalEntryWithAbortedPartialTxnRefetchesOnlyTransactionTa
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON());
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 1 << "lsid" << lsid.toBSON());
UUID transactionTableUUID = UUID::gen();
fui.transactionTableUUID = transactionTableUUID;
@@ -2335,21 +2133,11 @@ TEST_F(RSRollbackTest, LocalEntryWithCommittedTxnRefetchesDocsAndTransactionTabl
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 2 << "a" << 2)))
- << "count"
- << 2)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 2
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 2 << "a" << 2)))
+ << "count" << 2)
+ << "txnNumber" << 1LL << "stmtId" << 2 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(1), 1) << "t" << 1LL));
auto commitTxnOperation = std::make_pair(commitTxnEntry, RecordId(2));
@@ -2361,21 +2149,11 @@ TEST_F(RSRollbackTest, LocalEntryWithCommittedTxnRefetchesDocsAndTransactionTabl
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 1 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(0, 0) << "t" << -1LL));
auto partialTxnOperation = std::make_pair(partialTxnEntry, RecordId(1));
@@ -2428,21 +2206,11 @@ TEST_F(RSRollbackTest, RollbackFetchesTransactionOperationBeforeCommonPoint) {
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 0 << "a" << 0)))
- << "count"
- << 3)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 3
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 0 << "a" << 0)))
+ << "count" << 3)
+ << "txnNumber" << 1LL << "stmtId" << 3 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(10), 11) << "t" << 10LL));
auto commitTxnOperation = std::make_pair(commitTxnEntry, RecordId(12));
@@ -2454,21 +2222,11 @@ TEST_F(RSRollbackTest, RollbackFetchesTransactionOperationBeforeCommonPoint) {
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 2
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 2 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(10), 9) << "t" << 10LL));
auto operationAfterCommonPoint = std::make_pair(entryAfterCommonPoint, RecordId(11));
@@ -2480,21 +2238,11 @@ TEST_F(RSRollbackTest, RollbackFetchesTransactionOperationBeforeCommonPoint) {
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 2 << "a" << 2)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 2 << "a" << 2)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 1 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(0, 0) << "t" << -1LL));
auto operationBeforeCommonPoint = std::make_pair(entryBeforeCommonPoint, RecordId(9));
@@ -2572,19 +2320,11 @@ TEST_F(RSRollbackTest, RollbackIncompleteTransactionReturnsUnrecoverableRollback
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 0 << "a" << 0)))
- << "count"
- << 3)
- << "stmtId"
- << 3
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 0 << "a" << 0)))
+ << "count" << 3)
+ << "stmtId" << 3 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(10), 11) << "t" << 10LL));
auto commitTxnOperation = std::make_pair(commitTxnEntry, RecordId(12));
@@ -2596,21 +2336,11 @@ TEST_F(RSRollbackTest, RollbackIncompleteTransactionReturnsUnrecoverableRollback
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 2
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 2 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(10), 9) << "t" << 10LL));
auto operationAfterCommonPoint = std::make_pair(entryAfterCommonPoint, RecordId(11));
@@ -2653,20 +2383,13 @@ TEST_F(RSRollbackTest, RollbackFailsIfTransactionDocumentRefetchReturnsDifferent
// transaction number and session id.
FixUpInfo fui;
- auto entryWithTxnNumber = BSON("ts" << Timestamp(Seconds(2), 1) << "t" << 1LL << "op"
- << "i"
- << "ui"
- << UUID::gen()
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << makeLogicalSessionIdForTest().toBSON());
+ auto entryWithTxnNumber =
+ BSON("ts" << Timestamp(Seconds(2), 1) << "t" << 1LL << "op"
+ << "i"
+ << "ui" << UUID::gen() << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 1 << "a" << 1) << "txnNumber" << 1LL << "stmtId" << 1
+ << "lsid" << makeLogicalSessionIdForTest().toBSON());
UUID transactionTableUUID = UUID::gen();
fui.transactionTableUUID = transactionTableUUID;
diff --git a/src/mongo/db/repl/split_horizon_test.cpp b/src/mongo/db/repl/split_horizon_test.cpp
index 0a3a655ccaf..95b2df2ad36 100644
--- a/src/mongo/db/repl/split_horizon_test.cpp
+++ b/src/mongo/db/repl/split_horizon_test.cpp
@@ -300,8 +300,7 @@ TEST(SplitHorizonTesting, BSONConstruction) {
// Two horizons with duplicate host and ports.
{BSON("horizonWithDuplicateHost1" << matchingHostAndPort << "horizonWithDuplicateHost2"
- << matchingHostAndPort
- << "uniqueHorizon"
+ << matchingHostAndPort << "uniqueHorizon"
<< nonmatchingHost),
defaultHostAndPort,
{},
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 39d5c73b6e4..19e7c8840fa 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -501,20 +501,16 @@ Status StorageInterfaceImpl::renameCollection(OperationContext* opCtx,
if (fromNS.db() != toNS.db()) {
return Status(ErrorCodes::InvalidNamespace,
str::stream() << "Cannot rename collection between databases. From NS: "
- << fromNS.ns()
- << "; to NS: "
- << toNS.ns());
+ << fromNS.ns() << "; to NS: " << toNS.ns());
}
return writeConflictRetry(opCtx, "StorageInterfaceImpl::renameCollection", fromNS.ns(), [&] {
AutoGetDb autoDB(opCtx, fromNS.db(), MODE_X);
if (!autoDB.getDb()) {
return Status(ErrorCodes::NamespaceNotFound,
- str::stream() << "Cannot rename collection from " << fromNS.ns() << " to "
- << toNS.ns()
- << ". Database "
- << fromNS.db()
- << " not found.");
+ str::stream()
+ << "Cannot rename collection from " << fromNS.ns() << " to "
+ << toNS.ns() << ". Database " << fromNS.db() << " not found.");
}
WriteUnitOfWork wunit(opCtx);
const auto status = autoDB.getDb()->renameCollection(opCtx, fromNS, toNS, stayTemp);
@@ -557,8 +553,7 @@ Status StorageInterfaceImpl::setIndexIsMultikey(OperationContext* opCtx,
if (!idx) {
return Status(ErrorCodes::IndexNotFound,
str::stream() << "Could not find index " << indexName << " in "
- << nss.ns()
- << " to set to multikey.");
+ << nss.ns() << " to set to multikey.");
}
collection->getIndexCatalog()->setMultikeyPaths(opCtx, idx, paths);
wunit.commit();
@@ -646,16 +641,13 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
if (!indexDescriptor) {
return Result(ErrorCodes::IndexNotFound,
str::stream() << "Index not found, ns:" << nsOrUUID.toString()
- << ", index: "
- << *indexName);
+ << ", index: " << *indexName);
}
if (indexDescriptor->isPartial()) {
return Result(ErrorCodes::IndexOptionsConflict,
str::stream()
<< "Partial index is not allowed for this operation, ns:"
- << nsOrUUID.toString()
- << ", index: "
- << *indexName);
+ << nsOrUUID.toString() << ", index: " << *indexName);
}
KeyPattern keyPattern(indexDescriptor->keyPattern());
@@ -855,11 +847,11 @@ Status _updateWithQuery(OperationContext* opCtx,
}
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
- auto collectionResult = getCollection(
- autoColl,
- nss,
- str::stream() << "Unable to update documents in " << nss.ns() << " using query "
- << request.getQuery());
+ auto collectionResult =
+ getCollection(autoColl,
+ nss,
+ str::stream() << "Unable to update documents in " << nss.ns()
+ << " using query " << request.getQuery());
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
@@ -988,11 +980,11 @@ Status StorageInterfaceImpl::deleteByFilter(OperationContext* opCtx,
}
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
- auto collectionResult = getCollection(
- autoColl,
- nss,
- str::stream() << "Unable to delete documents in " << nss.ns() << " using filter "
- << filter);
+ auto collectionResult =
+ getCollection(autoColl,
+ nss,
+ str::stream() << "Unable to delete documents in " << nss.ns()
+ << " using filter " << filter);
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index 19d57312b19..8ade17d6efa 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -69,11 +69,7 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
BSONObj makeIdIndexSpec(const NamespaceString& nss) {
return BSON("ns" << nss.toString() << "name"
<< "_id_"
- << "key"
- << BSON("_id" << 1)
- << "unique"
- << true
- << "v"
+ << "key" << BSON("_id" << 1) << "unique" << true << "v"
<< static_cast<int>(kIndexVersion));
}
@@ -299,8 +295,7 @@ void _assertRollbackIDDocument(OperationContext* opCtx, int id) {
opCtx,
NamespaceString(StorageInterfaceImpl::kDefaultRollbackIdNamespace),
{BSON("_id" << StorageInterfaceImpl::kRollbackIdDocumentId
- << StorageInterfaceImpl::kRollbackIdFieldName
- << id)});
+ << StorageInterfaceImpl::kRollbackIdFieldName << id)});
}
TEST_F(StorageInterfaceImplTest, RollbackIdInitializesIncrementsAndReadsProperly) {
@@ -380,8 +375,7 @@ TEST_F(StorageInterfaceImplTest, GetRollbackIDReturnsBadStatusIfRollbackIDIsNotI
std::vector<TimestampedBSONObj> badDoc = {
TimestampedBSONObj{BSON("_id" << StorageInterfaceImpl::kRollbackIdDocumentId
- << StorageInterfaceImpl::kRollbackIdFieldName
- << "bad id"),
+ << StorageInterfaceImpl::kRollbackIdFieldName << "bad id"),
Timestamp::min()}};
ASSERT_OK(storage.insertDocuments(opCtx, nss, transformInserts(badDoc)));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, storage.getRollbackID(opCtx).getStatus());
@@ -625,8 +619,7 @@ TEST_F(StorageInterfaceImplTest, DestroyingUncommittedCollectionBulkLoaderDropsI
auto nss = makeNamespace(_agent);
std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name"
<< "x_1"
- << "ns"
- << nss.ns())};
+ << "ns" << nss.ns())};
auto destroyLoaderFn = [](std::unique_ptr<CollectionBulkLoader> loader) {
// Destroy 'loader' by letting it go out of scope.
};
@@ -650,8 +643,7 @@ TEST_F(StorageInterfaceImplTest,
auto nss = makeNamespace(_agent);
std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name"
<< "x_1"
- << "ns"
- << nss.ns())};
+ << "ns" << nss.ns())};
auto destroyLoaderFn = [](std::unique_ptr<CollectionBulkLoader> loader) {
// Destroy 'loader' in a new thread that does not have a Client.
stdx::thread([&loader]() { loader.reset(); }).join();
@@ -914,9 +906,7 @@ TEST_F(StorageInterfaceImplTest, FindDocumentsReturnsIndexOptionsConflictIfIndex
auto nss = makeNamespace(_agent);
std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name"
<< "x_1"
- << "ns"
- << nss.ns()
- << "partialFilterExpression"
+ << "ns" << nss.ns() << "partialFilterExpression"
<< BSON("y" << 1))};
auto loader = unittest::assertGet(storage.createCollectionForBulkLoading(
nss, generateOptionsWithUuid(), makeIdIndexSpec(nss), indexes));
@@ -975,8 +965,8 @@ void _assertDocumentsEqual(const StatusWith<std::vector<BSONObj>>& statusWithDoc
const std::vector<BSONObj>& expectedDocs) {
const auto actualDocs = unittest::assertGet(statusWithDocs);
auto iter = actualDocs.cbegin();
- std::string msg = str::stream() << "expected: " << _toString(expectedDocs)
- << "; actual: " << _toString(actualDocs);
+ std::string msg = str::stream()
+ << "expected: " << _toString(expectedDocs) << "; actual: " << _toString(actualDocs);
for (const auto& doc : expectedDocs) {
ASSERT_TRUE(iter != actualDocs.cend()) << msg;
ASSERT_BSONOBJ_EQ(doc, *(iter++));
@@ -2264,9 +2254,7 @@ TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenDatab
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, status);
ASSERT_EQUALS(std::string(str::stream()
<< "Database [nosuchdb] not found. Unable to delete documents in "
- << nss.ns()
- << " using filter "
- << filter),
+ << nss.ns() << " using filter " << filter),
status.reason());
}
@@ -2362,9 +2350,7 @@ TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenColle
ASSERT_EQUALS(std::string(
str::stream()
<< "Collection [mydb.wrongColl] not found. Unable to delete documents in "
- << wrongColl.ns()
- << " using filter "
- << filter),
+ << wrongColl.ns() << " using filter " << filter),
status.reason());
}
@@ -2484,8 +2470,7 @@ TEST_F(StorageInterfaceImplTest,
CollectionOptions options = generateOptionsWithUuid();
options.collation = BSON("locale"
<< "en_US"
- << "strength"
- << 2);
+ << "strength" << 2);
ASSERT_OK(storage.createCollection(opCtx, nss, options));
auto doc1 = BSON("_id" << 1 << "x"
@@ -2660,9 +2645,8 @@ TEST_F(StorageInterfaceImplTest, SetIndexIsMultikeySucceeds) {
ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
auto indexName = "a_b_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a.b" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a.b" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_EQUALS(_createIndexOnEmptyCollection(opCtx, nss, indexSpec), 2);
MultikeyPaths paths = {{1}};
diff --git a/src/mongo/db/repl/storage_interface_mock.h b/src/mongo/db/repl/storage_interface_mock.h
index 119f682fba9..cc031904cb8 100644
--- a/src/mongo/db/repl/storage_interface_mock.h
+++ b/src/mongo/db/repl/storage_interface_mock.h
@@ -352,8 +352,8 @@ public:
[](const NamespaceString& nss,
const CollectionOptions& options,
const BSONObj idIndexSpec,
- const std::vector<BSONObj>&
- secondaryIndexSpecs) -> StatusWith<std::unique_ptr<CollectionBulkLoader>> {
+ const std::vector<BSONObj>& secondaryIndexSpecs)
+ -> StatusWith<std::unique_ptr<CollectionBulkLoader>> {
return Status{ErrorCodes::IllegalOperation, "CreateCollectionForBulkFn not implemented."};
};
InsertDocumentFn insertDocumentFn = [](OperationContext* opCtx,
@@ -404,8 +404,9 @@ public:
IsAdminDbValidFn isAdminDbValidFn = [](OperationContext*) {
return Status{ErrorCodes::IllegalOperation, "IsAdminDbValidFn not implemented."};
};
- GetCollectionUUIDFn getCollectionUUIDFn = [](
- OperationContext* opCtx, const NamespaceString& nss) -> StatusWith<OptionalCollectionUUID> {
+ GetCollectionUUIDFn getCollectionUUIDFn =
+ [](OperationContext* opCtx,
+ const NamespaceString& nss) -> StatusWith<OptionalCollectionUUID> {
return Status{ErrorCodes::IllegalOperation, "GetCollectionUUIDFn not implemented."};
};
UpgradeNonReplicatedUniqueIndexesFn upgradeNonReplicatedUniqueIndexesFn =
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index adfcc7b2f31..af82a940d35 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -74,8 +74,7 @@ SyncSourceResolver::SyncSourceResolver(executor::TaskExecutor* taskExecutor,
str::stream() << "required optime (if provided) must be more recent than last "
"fetched optime. requiredOpTime: "
<< requiredOpTime.toString()
- << ", lastOpTimeFetched: "
- << lastOpTimeFetched.toString(),
+ << ", lastOpTimeFetched: " << lastOpTimeFetched.toString(),
requiredOpTime.isNull() || requiredOpTime > lastOpTimeFetched);
uassert(ErrorCodes::BadValue, "callback function cannot be null", onCompletion);
}
@@ -171,9 +170,8 @@ std::unique_ptr<Fetcher> SyncSourceResolver::_makeFirstOplogEntryFetcher(
kLocalOplogNss.db().toString(),
BSON("find" << kLocalOplogNss.coll() << "limit" << 1 << "sort" << BSON("$natural" << 1)
<< "projection"
- << BSON(OplogEntryBase::kTimestampFieldName << 1
- << OplogEntryBase::kTermFieldName
- << 1)),
+ << BSON(OplogEntryBase::kTimestampFieldName
+ << 1 << OplogEntryBase::kTermFieldName << 1)),
[=](const StatusWith<Fetcher::QueryResponse>& response,
Fetcher::NextAction*,
BSONObjBuilder*) {
@@ -413,12 +411,11 @@ Status SyncSourceResolver::_compareRequiredOpTimeWithQueryResponse(
const auto opTime = oplogEntry.getOpTime();
if (_requiredOpTime != opTime) {
return Status(ErrorCodes::BadValue,
- str::stream() << "remote oplog contain entry with matching timestamp "
- << opTime.getTimestamp().toString()
- << " but optime "
- << opTime.toString()
- << " does not "
- "match our required optime");
+ str::stream()
+ << "remote oplog contain entry with matching timestamp "
+ << opTime.getTimestamp().toString() << " but optime " << opTime.toString()
+ << " does not "
+ "match our required optime");
}
if (_requiredOpTime.getTerm() != opTime.getTerm()) {
return Status(ErrorCodes::BadValue,
@@ -439,8 +436,7 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
str::stream() << "sync source resolver shut down while looking for "
"required optime "
<< _requiredOpTime.toString()
- << " in candidate's oplog: "
- << candidate))
+ << " in candidate's oplog: " << candidate))
.transitional_ignore();
return;
}
diff --git a/src/mongo/db/repl/sync_source_selector.h b/src/mongo/db/repl/sync_source_selector.h
index 0a620d691a2..c21a5e82a14 100644
--- a/src/mongo/db/repl/sync_source_selector.h
+++ b/src/mongo/db/repl/sync_source_selector.h
@@ -41,7 +41,7 @@ class Timestamp;
namespace rpc {
class ReplSetMetadata;
class OplogQueryMetadata;
-}
+} // namespace rpc
namespace repl {
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index a21c1162829..4e7f6553e11 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -786,8 +786,7 @@ void SyncTail::_oplogApplication(ReplicationCoordinator* replCoord,
str::stream() << "Attempted to apply an oplog entry ("
<< firstOpTimeInBatch.toString()
<< ") which is not greater than our last applied OpTime ("
- << lastAppliedOpTimeAtStartOfBatch.toString()
- << ")."));
+ << lastAppliedOpTimeAtStartOfBatch.toString() << ")."));
}
// Don't allow the fsync+lock thread to see intermediate states of batch application.
@@ -817,8 +816,7 @@ void SyncTail::_oplogApplication(ReplicationCoordinator* replCoord,
const auto lastAppliedOpTimeAtEndOfBatch = replCoord->getMyLastAppliedOpTime();
invariant(lastAppliedOpTimeAtStartOfBatch == lastAppliedOpTimeAtEndOfBatch,
str::stream() << "the last known applied OpTime has changed from "
- << lastAppliedOpTimeAtStartOfBatch.toString()
- << " to "
+ << lastAppliedOpTimeAtStartOfBatch.toString() << " to "
<< lastAppliedOpTimeAtEndOfBatch.toString()
<< " in the middle of batch application");
@@ -1299,23 +1297,23 @@ void SyncTail::_applyOps(std::vector<MultiApplier::OperationPtrs>& writerVectors
if (writerVectors[i].empty())
continue;
- _writerPool->schedule([
- this,
- &writer = writerVectors.at(i),
- &status = statusVector->at(i),
- &workerMultikeyPathInfo = workerMultikeyPathInfo->at(i)
- ](auto scheduleStatus) {
- invariant(scheduleStatus);
+ _writerPool->schedule(
+ [this,
+ &writer = writerVectors.at(i),
+ &status = statusVector->at(i),
+ &workerMultikeyPathInfo = workerMultikeyPathInfo->at(i)](auto scheduleStatus) {
+ invariant(scheduleStatus);
- auto opCtx = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
- // This code path is only executed on secondaries and initial syncing nodes, so it is
- // safe to exclude any writes from Flow Control.
- opCtx->setShouldParticipateInFlowControl(false);
+ // This code path is only executed on secondaries and initial syncing nodes, so it
+ // is safe to exclude any writes from Flow Control.
+ opCtx->setShouldParticipateInFlowControl(false);
- status = opCtx->runWithoutInterruptionExceptAtGlobalShutdown(
- [&] { return _applyFunc(opCtx.get(), &writer, this, &workerMultikeyPathInfo); });
- });
+ status = opCtx->runWithoutInterruptionExceptAtGlobalShutdown([&] {
+ return _applyFunc(opCtx.get(), &writer, this, &workerMultikeyPathInfo);
+ });
+ });
}
}
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index 33558e09a39..b5aeb361244 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -355,14 +355,8 @@ TEST_F(SyncTailTest, SyncApplyCommand) {
NamespaceString nss("test.t");
auto op = BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
- << BSON("create" << nss.coll())
- << "ts"
- << Timestamp(1, 1)
- << "ui"
- << UUID::gen());
+ << "ns" << nss.getCommandNS().ns() << "o" << BSON("create" << nss.coll()) << "ts"
+ << Timestamp(1, 1) << "ui" << UUID::gen());
bool applyCmdCalled = false;
_opObserver->onCreateCollectionFn = [&](OperationContext* opCtx,
Collection*,
@@ -387,13 +381,10 @@ TEST_F(SyncTailTest, SyncApplyCommand) {
TEST_F(SyncTailTest, SyncApplyCommandThrowsException) {
const BSONObj op = BSON("op"
<< "c"
- << "ns"
- << 12345
- << "o"
+ << "ns" << 12345 << "o"
<< BSON("create"
<< "t")
- << "ts"
- << Timestamp(1, 1));
+ << "ts" << Timestamp(1, 1));
// This test relies on the namespace type check of IDL.
ASSERT_THROWS(
SyncTail::syncApply(_opCtx.get(), op, OplogApplication::Mode::kInitialSync, boost::none),
@@ -493,14 +484,9 @@ protected:
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 1)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
_txnNum,
StmtId(0),
@@ -510,14 +496,9 @@ protected:
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss2.ns()
- << "ui"
- << *_uuid2
- << "o"
+ << "ns" << _nss2.ns() << "ui" << *_uuid2 << "o"
<< BSON("_id" << 2)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
_txnNum,
StmtId(1),
@@ -527,11 +508,7 @@ protected:
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss2.ns()
- << "ui"
- << *_uuid2
- << "o"
+ << "ns" << _nss2.ns() << "ui" << *_uuid2 << "o"
<< BSON("_id" << 3)))),
_lsid,
_txnNum,
@@ -683,14 +660,10 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyUnpreparedTransactionTwoBatches) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << (i == 1 ? _nss2.ns() : _nss1.ns())
- << "ui"
- << (i == 1 ? *_uuid2 : *_uuid1)
- << "o"
+ << "ns" << (i == 1 ? _nss2.ns() : _nss1.ns()) << "ui"
+ << (i == 1 ? *_uuid2 : *_uuid1) << "o"
<< insertDocs.back()))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
_txnNum,
StmtId(i),
@@ -757,14 +730,9 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyTwoTransactionsOneBatch) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 1)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
txnNum1,
StmtId(0),
@@ -774,14 +742,9 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyTwoTransactionsOneBatch) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 2)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
txnNum1,
@@ -792,14 +755,9 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyTwoTransactionsOneBatch) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 3)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
txnNum2,
StmtId(0),
@@ -809,14 +767,9 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyTwoTransactionsOneBatch) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 4)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
txnNum2,
StmtId(1),
@@ -877,14 +830,9 @@ protected:
_nss1,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss2.ns()
- << "ui"
- << *_uuid2
- << "o"
+ << "ns" << _nss2.ns() << "ui" << *_uuid2 << "o"
<< BSON("_id" << 3)))
- << "prepare"
- << true),
+ << "prepare" << true),
_lsid,
_txnNum,
StmtId(2),
@@ -894,14 +842,9 @@ protected:
_nss1,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 0)))
- << "prepare"
- << true),
+ << "prepare" << true),
_lsid,
_txnNum,
StmtId(0),
@@ -2190,28 +2133,18 @@ TEST_F(IdempotencyTest, CreateCollectionWithCollation) {
auto insertOp2 = insert(fromjson("{ _id: 'Foo', x: 1 }"));
auto updateOp = update("foo", BSON("$set" << BSON("x" << 2)));
auto dropColl = makeCommandOplogEntry(nextOpTime(), nss, BSON("drop" << nss.coll()));
- auto options = BSON("collation" << BSON("locale"
- << "en"
- << "caseLevel"
- << false
- << "caseFirst"
- << "off"
- << "strength"
- << 1
- << "numericOrdering"
- << false
- << "alternate"
- << "non-ignorable"
- << "maxVariable"
- << "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
- << "57.1")
- << "uuid"
- << uuid);
+ auto options = BSON("collation"
+ << BSON("locale"
+ << "en"
+ << "caseLevel" << false << "caseFirst"
+ << "off"
+ << "strength" << 1 << "numericOrdering" << false << "alternate"
+ << "non-ignorable"
+ << "maxVariable"
+ << "punct"
+ << "normalization" << false << "backwards" << false << "version"
+ << "57.1")
+ << "uuid" << uuid);
auto createColl = makeCreateCollectionOplogEntry(nextOpTime(), nss, options);
// We don't drop and re-create the collection since we don't have ways
@@ -2235,12 +2168,8 @@ TEST_F(IdempotencyTest, CreateCollectionWithIdIndex) {
auto options1 = BSON("idIndex" << BSON("key" << fromjson("{_id: 1}") << "name"
<< "_id_"
- << "v"
- << 2
- << "ns"
- << nss.ns())
- << "uuid"
- << uuid);
+ << "v" << 2 << "ns" << nss.ns())
+ << "uuid" << uuid);
auto createColl1 = makeCreateCollectionOplogEntry(nextOpTime(), nss, options1);
ASSERT_OK(runOpInitialSync(createColl1));
@@ -2274,9 +2203,8 @@ TEST_F(IdempotencyTest, CreateCollectionWithView) {
ASSERT_OK(
runOpInitialSync(makeCreateCollectionOplogEntry(nextOpTime(), viewNss, options.toBSON())));
- auto viewDoc =
- BSON("_id" << NamespaceString(nss.db(), "view").ns() << "viewOn" << nss.coll() << "pipeline"
- << fromjson("[ { '$project' : { 'x' : 1 } } ]"));
+ auto viewDoc = BSON("_id" << NamespaceString(nss.db(), "view").ns() << "viewOn" << nss.coll()
+ << "pipeline" << fromjson("[ { '$project' : { 'x' : 1 } } ]"));
auto insertViewOp = makeInsertDocumentOplogEntry(nextOpTime(), viewNss, viewDoc);
auto dropColl = makeCommandOplogEntry(nextOpTime(), nss, BSON("drop" << nss.coll()));
@@ -2698,14 +2626,9 @@ TEST_F(SyncTailTxnTableTest, RetryableWriteThenMultiStatementTxnWriteOnSameSessi
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss().ns()
- << "ui"
- << *uuid
- << "o"
+ << "ns" << nss().ns() << "ui" << *uuid << "o"
<< BSON("_id" << 2)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
sessionId,
*sessionInfo.getTxnNumber(),
StmtId(0),
@@ -2754,14 +2677,9 @@ TEST_F(SyncTailTxnTableTest, MultiStatementTxnWriteThenRetryableWriteOnSameSessi
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss().ns()
- << "ui"
- << *uuid
- << "o"
+ << "ns" << nss().ns() << "ui" << *uuid << "o"
<< BSON("_id" << 2)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
sessionId,
*sessionInfo.getTxnNumber(),
StmtId(0),
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index a79cdfa7faa..4c53b558aa1 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -182,7 +182,6 @@ void TaskRunner::_runTasks() {
"this task has been canceled by a previously invoked task"));
}
tasks.clear();
-
};
cancelTasks();
diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp
index 43b0a7a8605..81056086087 100644
--- a/src/mongo/db/repl/topology_coordinator.cpp
+++ b/src/mongo/db/repl/topology_coordinator.cpp
@@ -249,8 +249,8 @@ HostAndPort TopologyCoordinator::chooseNewSyncSource(Date_t now,
_syncSource = _rsConfig.getMemberAt(_forceSyncSourceIndex).getHostAndPort();
_forceSyncSourceIndex = -1;
log() << "choosing sync source candidate by request: " << _syncSource;
- std::string msg(str::stream() << "syncing from: " << _syncSource.toString()
- << " by request");
+ std::string msg(str::stream()
+ << "syncing from: " << _syncSource.toString() << " by request");
setMyHeartbeatMessage(now, msg);
return _syncSource;
}
@@ -572,8 +572,7 @@ Status TopologyCoordinator::prepareHeartbeatResponseV1(Date_t now,
<< "; remote node's: " << rshb;
return Status(ErrorCodes::InconsistentReplicaSetNames,
str::stream() << "Our set name of " << ourSetName << " does not match name "
- << rshb
- << " reported by remote node");
+ << rshb << " reported by remote node");
}
const MemberState myState = getMemberState();
@@ -782,8 +781,9 @@ HeartbeatResponseAction TopologyCoordinator::processHeartbeatResponse(
}
const int memberIndex = _rsConfig.findMemberIndexByHostAndPort(target);
if (memberIndex == -1) {
- LOG(1) << "Could not find " << target << " in current config so ignoring --"
- " current config: "
+ LOG(1) << "Could not find " << target
+ << " in current config so ignoring --"
+ " current config: "
<< _rsConfig.toBSON();
HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeNoAction();
nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
@@ -1131,8 +1131,9 @@ HeartbeatResponseAction TopologyCoordinator::_updatePrimaryFromHBDataV1(
bool scheduleCatchupTakeover = false;
bool schedulePriorityTakeover = false;
- if (!catchupTakeoverDisabled && (_memberData.at(primaryIndex).getLastAppliedOpTime() <
- _memberData.at(_selfIndex).getLastAppliedOpTime())) {
+ if (!catchupTakeoverDisabled &&
+ (_memberData.at(primaryIndex).getLastAppliedOpTime() <
+ _memberData.at(_selfIndex).getLastAppliedOpTime())) {
LOG_FOR_ELECTION(2) << "I can take over the primary due to fresher data."
<< " Current primary index: " << primaryIndex << " in term "
<< _memberData.at(primaryIndex).getTerm() << "."
@@ -2711,38 +2712,30 @@ void TopologyCoordinator::processReplSetRequestVotes(const ReplSetRequestVotesAr
if (args.getTerm() < _term) {
response->setVoteGranted(false);
response->setReason(str::stream() << "candidate's term (" << args.getTerm()
- << ") is lower than mine ("
- << _term
- << ")");
+ << ") is lower than mine (" << _term << ")");
} else if (args.getConfigVersion() != _rsConfig.getConfigVersion()) {
response->setVoteGranted(false);
- response->setReason(str::stream() << "candidate's config version ("
- << args.getConfigVersion()
- << ") differs from mine ("
- << _rsConfig.getConfigVersion()
- << ")");
+ response->setReason(str::stream()
+ << "candidate's config version (" << args.getConfigVersion()
+ << ") differs from mine (" << _rsConfig.getConfigVersion() << ")");
} else if (args.getSetName() != _rsConfig.getReplSetName()) {
response->setVoteGranted(false);
- response->setReason(str::stream() << "candidate's set name (" << args.getSetName()
- << ") differs from mine ("
- << _rsConfig.getReplSetName()
- << ")");
+ response->setReason(str::stream()
+ << "candidate's set name (" << args.getSetName()
+ << ") differs from mine (" << _rsConfig.getReplSetName() << ")");
} else if (args.getLastDurableOpTime() < getMyLastAppliedOpTime()) {
response->setVoteGranted(false);
response
->setReason(str::stream()
<< "candidate's data is staler than mine. candidate's last applied OpTime: "
<< args.getLastDurableOpTime().toString()
- << ", my last applied OpTime: "
- << getMyLastAppliedOpTime().toString());
+ << ", my last applied OpTime: " << getMyLastAppliedOpTime().toString());
} else if (!args.isADryRun() && _lastVote.getTerm() == args.getTerm()) {
response->setVoteGranted(false);
response->setReason(str::stream()
<< "already voted for another candidate ("
<< _rsConfig.getMemberAt(_lastVote.getCandidateIndex()).getHostAndPort()
- << ") this term ("
- << _lastVote.getTerm()
- << ")");
+ << ") this term (" << _lastVote.getTerm() << ")");
} else {
int betterPrimary = _findHealthyPrimaryOfEqualOrGreaterPriority(args.getCandidateIndex());
if (_selfConfig().isArbiter() && betterPrimary >= 0) {
diff --git a/src/mongo/db/repl/topology_coordinator.h b/src/mongo/db/repl/topology_coordinator.h
index b07ef7deb3f..6a6a6af4652 100644
--- a/src/mongo/db/repl/topology_coordinator.h
+++ b/src/mongo/db/repl/topology_coordinator.h
@@ -1076,7 +1076,7 @@ public:
/**
* Gets the number of retries left for this heartbeat attempt. Invalid to call if the current
* state is 'UNINITIALIZED'.
- */
+ */
int retriesLeft() const {
return kMaxHeartbeatRetries - _numFailuresSinceLastStart;
}
diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
index 73b2fd6bcf6..633c0220372 100644
--- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
@@ -53,9 +53,9 @@
#define ASSERT_NO_ACTION(EXPRESSION) \
ASSERT_EQUALS(mongo::repl::HeartbeatResponseAction::NoAction, (EXPRESSION))
-using std::unique_ptr;
-using mongo::rpc::ReplSetMetadata;
using mongo::rpc::OplogQueryMetadata;
+using mongo::rpc::ReplSetMetadata;
+using std::unique_ptr;
namespace mongo {
namespace repl {
@@ -326,9 +326,7 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -399,44 +397,31 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
}
TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
- updateConfig(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "hself")
- << BSON("_id" << 10 << "host"
- << "h1")
- << BSON("_id" << 20 << "host"
- << "h2"
- << "buildIndexes"
- << false
- << "priority"
- << 0)
- << BSON("_id" << 30 << "host"
- << "h3"
- << "hidden"
- << true
- << "priority"
- << 0
- << "votes"
- << 0)
- << BSON("_id" << 40 << "host"
- << "h4"
- << "arbiterOnly"
- << true)
- << BSON("_id" << 50 << "host"
- << "h5"
- << "slaveDelay"
- << 1
- << "priority"
- << 0)
- << BSON("_id" << 60 << "host"
- << "h6")
- << BSON("_id" << 70 << "host"
- << "hprimary"))),
- 0);
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself")
+ << BSON("_id" << 10 << "host"
+ << "h1")
+ << BSON("_id" << 20 << "host"
+ << "h2"
+ << "buildIndexes" << false << "priority" << 0)
+ << BSON("_id" << 30 << "host"
+ << "h3"
+ << "hidden" << true << "priority" << 0 << "votes" << 0)
+ << BSON("_id" << 40 << "host"
+ << "h4"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 50 << "host"
+ << "h5"
+ << "slaveDelay" << 1 << "priority" << 0)
+ << BSON("_id" << 60 << "host"
+ << "h6")
+ << BSON("_id" << 70 << "host"
+ << "hprimary"))),
+ 0);
setSelfMemberState(MemberState::RS_SECONDARY);
OpTime lastOpTimeWeApplied = OpTime(Timestamp(100, 0), 0);
@@ -573,9 +558,7 @@ TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
TEST_F(TopoCoordTest, NodeWontChooseSyncSourceFromOlderTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself")
<< BSON("_id" << 10 << "host"
@@ -625,10 +608,7 @@ TEST_F(TopoCoordTest, NodeWontChooseSyncSourceFromOlderTerm) {
TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "settings"
- << BSON("chainingAllowed" << false)
+ << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
@@ -752,9 +732,7 @@ TEST_F(TopoCoordTest, ChooseOnlyVotersAsSyncSourceWhenNodeIsAVoter) {
TEST_F(TopoCoordTest, ChooseSameSyncSourceEvenWhenPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -809,9 +787,7 @@ TEST_F(TopoCoordTest, ChooseSameSyncSourceEvenWhenPrimary) {
TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourceIsForciblySet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -862,9 +838,7 @@ TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourc
TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExpires) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -917,10 +891,7 @@ TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExp
TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "settings"
- << BSON("chainingAllowed" << false)
+ << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
@@ -975,9 +946,7 @@ TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDis
TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -1050,9 +1019,7 @@ TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
TEST_F(TopoCoordTest, NodeDoesNotActOnHeartbeatsWhenAbsentFromConfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "h1")
<< BSON("_id" << 20 << "host"
@@ -1086,13 +1053,10 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstArbiter) {
// Test trying to sync from another node when we are an arbiter
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 1 << "host"
<< "h1"))),
0);
@@ -1108,21 +1072,15 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1150,21 +1108,15 @@ TEST_F(TopoCoordTest, NodeReturnsNodeNotFoundWhenSyncFromRequestsANodeNotInConfi
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1187,21 +1139,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsSelf) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1225,21 +1171,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1264,21 +1204,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsAnIndexNonbui
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1303,21 +1237,15 @@ TEST_F(TopoCoordTest, NodeReturnsHostUnreachableWhenSyncFromRequestsADownNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1346,21 +1274,15 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAStaleNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1395,21 +1317,15 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAValidNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1445,21 +1361,15 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1494,21 +1404,15 @@ TEST_F(TopoCoordTest, NodeReturnsUnauthorizedWhenSyncFromRequestsANodeWeAreNotAu
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1562,21 +1466,15 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1920,13 +1818,10 @@ TEST_F(TopoCoordTest, HeartbeatFrequencyShouldBeHalfElectionTimeoutWhenArbiter)
TEST_F(TopoCoordTest, PrepareStepDownAttemptFailsIfNotLeader) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
getTopoCoord().changeMemberState_forTest(MemberState::RS_SECONDARY);
Status expectedStatus(ErrorCodes::NotMaster, "This node is not a primary. ");
@@ -1940,17 +1835,14 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
<< "h2")
<< BSON("_id" << 30 << "host"
<< "h3"))
- << "settings"
- << BSON("protocolVersion" << 1)),
+ << "settings" << BSON("protocolVersion" << 1)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
}
@@ -1974,8 +1866,8 @@ TEST_F(PrepareHeartbeatResponseV1Test,
prepareHeartbeatResponseV1(args, &response, &result);
stopCapturingLogMessages();
ASSERT_EQUALS(ErrorCodes::InconsistentReplicaSetNames, result);
- ASSERT(result.reason().find("repl set names do not match")) << "Actual string was \""
- << result.reason() << '"';
+ ASSERT(result.reason().find("repl set names do not match"))
+ << "Actual string was \"" << result.reason() << '"';
ASSERT_EQUALS(1,
countLogLinesContaining("replSet set names do not match, ours: rs0; remote "
"node's: rs1"));
@@ -1988,15 +1880,12 @@ TEST_F(PrepareHeartbeatResponseV1Test,
// reconfig self out of set
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "members"
+ << "version" << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 20 << "host"
<< "h2")
<< BSON("_id" << 30 << "host"
<< "h3"))
- << "settings"
- << BSON("protocolVersion" << 1)),
+ << "settings" << BSON("protocolVersion" << 1)),
-1);
ReplSetHeartbeatArgsV1 args;
args.setSetName("rs0");
@@ -2192,9 +2081,7 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenBecomingSecondaryInSingleNodeSet) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"))),
0);
@@ -2212,9 +2099,7 @@ TEST_F(TopoCoordTest, DoNotBecomeCandidateWhenBecomingSecondaryInSingleNodeSetIf
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"))),
0);
@@ -2242,15 +2127,10 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
ReplSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority"
- << 0))))
+ << "priority" << 0))))
.transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2264,9 +2144,7 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"))),
0);
@@ -2280,15 +2158,10 @@ TEST_F(TopoCoordTest,
ReplSetConfig cfg;
ASSERT_OK(cfg.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority"
- << 0)))));
+ << "priority" << 0)))));
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2302,9 +2175,7 @@ TEST_F(TopoCoordTest,
getTopoCoord().adjustMaintenanceCountBy(1);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"))),
0);
@@ -2317,13 +2188,10 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
ReplSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority"
- << 0))))
+ << "priority" << 0))))
.transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
@@ -2342,9 +2210,7 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
// config to be absent from the set
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -2357,9 +2223,7 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
// reconfig to add to set
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2377,9 +2241,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfig) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2393,9 +2255,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfig) {
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -2411,9 +2271,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"))),
0);
@@ -2430,9 +2288,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -2448,11 +2304,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"))),
0);
@@ -2469,13 +2321,10 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
// now lose primary due to loss of electability
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -2490,9 +2339,7 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"))),
0);
@@ -2511,9 +2358,7 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
// Add hosts
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2528,18 +2373,13 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
// Change priorities and tags
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority"
- << 10)
+ << "priority" << 10)
<< BSON("_id" << 1 << "host"
<< "host2:27017"
- << "priority"
- << 5
- << "tags"
+ << "priority" << 5 << "tags"
<< BSON("dc"
<< "NA"
<< "rack"
@@ -2553,9 +2393,7 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
@@ -2569,9 +2407,7 @@ TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
// reconfig and stay secondary
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2586,13 +2422,10 @@ TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
TEST_F(TopoCoordTest, NodeReturnsArbiterWhenGetMemberStateRunsAgainstArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 20 << "host"
<< "h2")
<< BSON("_id" << 30 << "host"
@@ -2611,9 +2444,7 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileRemovedFromTheConfig) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2626,13 +2457,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2645,13 +2471,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
args2
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 1LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response2;
@@ -2666,9 +2487,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2682,14 +2501,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2704,14 +2517,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
args2
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2726,14 +2533,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
args3
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << false << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2748,14 +2549,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
args4
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << false << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2770,9 +2565,7 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2786,14 +2579,8 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << false << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2808,14 +2595,8 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
args2
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << false << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2830,9 +2611,7 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2846,13 +2625,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2865,9 +2639,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2881,13 +2653,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 0LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 1LL
+ << "configVersion" << 0LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2900,9 +2667,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2920,13 +2685,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 1LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2940,9 +2700,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2957,13 +2715,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 3LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 3LL << "candidateIndex" << 1LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2973,8 +2726,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
ASSERT_EQUALS(
str::stream() << "candidate's data is staler than mine. candidate's last applied OpTime: "
<< OpTime().toString()
- << ", my last applied OpTime: "
- << OpTime(Timestamp(20, 0), 0).toString(),
+ << ", my last applied OpTime: " << OpTime(Timestamp(20, 0), 0).toString(),
response.getReason());
ASSERT_FALSE(response.getVoteGranted());
}
@@ -2982,9 +2734,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3001,13 +2751,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3021,14 +2766,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "dryRun"
- << true
- << "term"
- << 2LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 2LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3043,9 +2782,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3062,13 +2799,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3082,14 +2814,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 2LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 0LL
+ << "dryRun" << true << "term" << 2LL
+ << "candidateIndex" << 1LL << "configVersion" << 0LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3104,9 +2830,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3123,13 +2847,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3142,14 +2861,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 0LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 0LL
+ << "candidateIndex" << 1LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3164,9 +2877,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3183,13 +2894,8 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3203,14 +2909,8 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 1LL
+ << "candidateIndex" << 1LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3225,9 +2925,7 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3244,13 +2942,8 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3264,14 +2957,8 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 3LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 3LL
+ << "candidateIndex" << 1LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3282,8 +2969,7 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
ASSERT_EQUALS(
str::stream() << "candidate's data is staler than mine. candidate's last applied OpTime: "
<< OpTime().toString()
- << ", my last applied OpTime: "
- << OpTime(Timestamp(20, 0), 0).toString(),
+ << ", my last applied OpTime: " << OpTime(Timestamp(20, 0), 0).toString(),
response.getReason());
ASSERT_EQUALS(1, response.getTerm());
ASSERT_FALSE(response.getVoteGranted());
@@ -3299,12 +2985,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedIfCSRSButHaveNoReadCommittedSuppor
updateConfig(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
@@ -3326,12 +3007,7 @@ TEST_F(TopoCoordTest, NodeBecomesSecondaryAsNormalWhenReadCommittedSupportedAndC
updateConfig(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
@@ -3352,18 +3028,14 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
}
@@ -3381,23 +3053,15 @@ TEST_F(HeartbeatResponseTestV1,
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 7
- << "members"
+ << "version" << 7 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "buildIndexes"
- << false
- << "priority"
- << 0)
+ << "buildIndexes" << false << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes"
- << false
- << "priority"
- << 0))),
+ << "buildIndexes" << false << "priority" << 0))),
0);
topoCoordSetMyLastAppliedOpTime(lastOpTimeApplied, Date_t(), false);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(
@@ -3733,15 +3397,12 @@ TEST_F(HeartbeatResponseTestV1, ReconfigNodeRemovedBetweenHeartbeatRequestAndRep
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
ReplSetHeartbeatResponse hb;
@@ -3783,28 +3444,19 @@ TEST_F(HeartbeatResponseTestV1, ReconfigBetweenHeartbeatRequestAndRepsonse) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
ReplSetHeartbeatResponse hb;
hb.initialize(BSON("ok" << 1 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
- << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 1
- << "state"
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime"
+ << Date_t() + Seconds(100) << "v" << 1 << "state"
<< MemberState::RS_PRIMARY),
0,
/*requireWallTime*/ true)
@@ -3863,20 +3515,15 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleAPriorityTakeoverWhenElectableAndReceiveHeartbeatFromLowerPriorityPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 6 << "host"
<< "host7:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -3897,21 +3544,16 @@ TEST_F(HeartbeatResponseTestV1,
TEST_F(HeartbeatResponseTestV1, UpdateHeartbeatDataTermPreventsPriorityTakeover) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 1 << "host"
<< "host1:27017"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 2 << "host"
<< "host2:27017"))
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -3952,18 +3594,14 @@ TEST_F(HeartbeatResponseTestV1, UpdateHeartbeatDataTermPreventsPriorityTakeover)
TEST_F(TopoCoordTest, FreshestNodeDoesCatchupTakeover) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -4009,18 +3647,14 @@ TEST_F(TopoCoordTest, FreshestNodeDoesCatchupTakeover) {
TEST_F(TopoCoordTest, StaleNodeDoesntDoCatchupTakeover) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -4068,18 +3702,14 @@ TEST_F(TopoCoordTest, StaleNodeDoesntDoCatchupTakeover) {
TEST_F(TopoCoordTest, NodeDoesntDoCatchupTakeoverHeartbeatSaysPrimaryCaughtUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -4124,18 +3754,14 @@ TEST_F(TopoCoordTest, NodeDoesntDoCatchupTakeoverHeartbeatSaysPrimaryCaughtUp) {
TEST_F(TopoCoordTest, NodeDoesntDoCatchupTakeoverIfTermNumbersSayPrimaryCaughtUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -4185,19 +3811,14 @@ TEST_F(TopoCoordTest, NodeDoesntDoCatchupTakeoverIfTermNumbersSayPrimaryCaughtUp
TEST_F(TopoCoordTest, StepDownAttemptFailsWhenNotPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4214,19 +3835,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsWhenNotPrimary) {
TEST_F(TopoCoordTest, StepDownAttemptFailsWhenAlreadySteppingDown) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4244,19 +3860,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsWhenAlreadySteppingDown) {
TEST_F(TopoCoordTest, StepDownAttemptFailsForDifferentTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4274,19 +3885,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsForDifferentTerm) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfPastStepDownUntil) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4306,19 +3912,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsIfPastStepDownUntil) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfPastWaitUntil) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4341,19 +3942,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsIfPastWaitUntil) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfNoSecondariesCaughtUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4374,19 +3970,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsIfNoSecondariesCaughtUp) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfNoSecondariesCaughtUpForceIsTrueButNotPastWaitUntil) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4407,19 +3998,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsIfNoSecondariesCaughtUpForceIsTrueButN
TEST_F(TopoCoordTest, StepDownAttemptSucceedsIfNoSecondariesCaughtUpForceIsTrueAndPastWaitUntil) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4440,19 +4026,14 @@ TEST_F(TopoCoordTest, StepDownAttemptSucceedsIfNoSecondariesCaughtUpForceIsTrueA
TEST_F(TopoCoordTest, StepDownAttemptSucceedsIfSecondariesCaughtUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4473,23 +4054,15 @@ TEST_F(TopoCoordTest, StepDownAttemptSucceedsIfSecondariesCaughtUp) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfSecondaryCaughtUpButNotElectable) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017"
- << "priority"
- << 0
- << "hidden"
- << true)
+ << "priority" << 0 << "hidden" << true)
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4519,15 +4092,12 @@ TEST_F(TopoCoordTest,
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
{
BSONObjBuilder statusBuilder;
@@ -4576,15 +4146,12 @@ TEST_F(TopoCoordTest,
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
ASSERT(getTopoCoord().getSyncSourceAddress().empty());
@@ -4649,10 +4216,7 @@ TEST_F(TopoCoordTest, replSetGetStatusForThreeMemberedReplicaSet) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "settings"
- << BSON("chainingAllowed" << false)
+ << "version" << 5 << "settings" << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 30 << "host"
<< "hself:27017")
@@ -4660,8 +4224,7 @@ TEST_F(TopoCoordTest, replSetGetStatusForThreeMemberedReplicaSet) {
<< "hprimary:27017")
<< BSON("_id" << 10 << "host"
<< "h1:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
ASSERT(getTopoCoord().getSyncSourceAddress().empty());
@@ -4752,13 +4315,10 @@ TEST_F(TopoCoordTest, StatusResponseAlwaysIncludesStringStatusFieldsForNonMember
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
-1); // This node is no longer part of this replica set.
BSONObjBuilder statusBuilder;
@@ -4788,9 +4348,7 @@ TEST_F(TopoCoordTest, StatusResponseAlwaysIncludesStringStatusFieldsForNonMember
TEST_F(TopoCoordTest, NoElectionHandoffCandidateInSingleNodeReplicaSet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017"))),
0);
@@ -4805,9 +4363,7 @@ TEST_F(TopoCoordTest, NoElectionHandoffCandidateInSingleNodeReplicaSet) {
TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneLaggedNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
@@ -4828,15 +4384,12 @@ TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneLaggedNode) {
TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneUnelectableNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"
- << "priority"
- << 0))),
+ << "priority" << 0))),
0);
const auto term = getTopoCoord().getTerm();
@@ -4853,17 +4406,14 @@ TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneUnelectableNode) {
TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneLaggedAndOneUnelectableNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017"
- << "priority"
- << 0))),
+ << "priority" << 0))),
0);
const auto term = getTopoCoord().getTerm();
@@ -4883,9 +4433,7 @@ TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneLaggedAndOneUnelectableNo
TEST_F(TopoCoordTest, ExactlyOneNodeEligibleForElectionHandoffOutOfOneSecondary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
@@ -4906,15 +4454,12 @@ TEST_F(TopoCoordTest, ExactlyOneNodeEligibleForElectionHandoffOutOfOneSecondary)
TEST_F(TopoCoordTest, ExactlyOneNodeEligibleForElectionHandoffOutOfThreeSecondaries) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
@@ -4943,17 +4488,14 @@ TEST_F(TopoCoordTest, ExactlyOneNodeEligibleForElectionHandoffOutOfThreeSecondar
TEST_F(TopoCoordTest, TwoNodesEligibleForElectionHandoffResolveByPriority) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017"
- << "priority"
- << 5))),
+ << "priority" << 5))),
0);
const auto term = getTopoCoord().getTerm();
@@ -4975,9 +4517,7 @@ TEST_F(TopoCoordTest, TwoNodesEligibleForElectionHandoffResolveByPriority) {
TEST_F(TopoCoordTest, TwoNodesEligibleForElectionHandoffEqualPriorityResolveByMemberId) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
@@ -5006,23 +4546,17 @@ TEST_F(TopoCoordTest, ArbiterNotIncludedInW3WriteInPSSAReplSet) {
// In a PSSA set, a w:3 write should only be acknowledged if both secondaries can satisfy it.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017"
- << "priority"
- << 0
- << "votes"
- << 0)
+ << "priority" << 0 << "votes" << 0)
<< BSON("_id" << 3 << "host"
<< "host3:27017"
- << "arbiterOnly"
- << true))),
+ << "arbiterOnly" << true))),
0);
const auto term = getTopoCoord().getTerm();
@@ -5051,31 +4585,21 @@ TEST_F(TopoCoordTest, ArbitersNotIncludedInW2WriteInPSSAAReplSet) {
// can satisfy it.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"
- << "priority"
- << 0
- << "votes"
- << 0)
+ << "priority" << 0 << "votes" << 0)
<< BSON("_id" << 2 << "host"
<< "host2:27017"
- << "priority"
- << 0
- << "votes"
- << 0)
+ << "priority" << 0 << "votes" << 0)
<< BSON("_id" << 3 << "host"
<< "host3:27017"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 4 << "host"
<< "host4:27017"
- << "arbiterOnly"
- << true))),
+ << "arbiterOnly" << true))),
0);
const auto term = getTopoCoord().getTerm();
@@ -5100,59 +4624,52 @@ TEST_F(TopoCoordTest, ArbitersNotIncludedInW2WriteInPSSAAReplSet) {
TEST_F(TopoCoordTest, CheckIfCommitQuorumCanBeSatisfied) {
ReplSetConfig configA;
- ASSERT_OK(configA.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node0"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA1"))
- << BSON("_id" << 1 << "host"
- << "node1"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA2"))
- << BSON("_id" << 2 << "host"
- << "node2"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA3"))
- << BSON("_id" << 3 << "host"
- << "node3"
- << "tags"
- << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU1"))
- << BSON("_id" << 4 << "host"
- << "node4"
- << "tags"
- << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU2"))
- << BSON("_id" << 5 << "host"
- << "node5"
- << "arbiterOnly"
- << true))
- << "settings"
- << BSON("getLastErrorModes"
- << BSON("valid" << BSON("dc" << 2 << "rack" << 3)
- << "invalidNotEnoughValues"
- << BSON("dc" << 3)
- << "invalidNotEnoughNodes"
- << BSON("rack" << 6))))));
+ ASSERT_OK(configA.initialize(BSON(
+ "_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node0"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA1"))
+ << BSON("_id" << 1 << "host"
+ << "node1"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA2"))
+ << BSON("_id" << 2 << "host"
+ << "node2"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA3"))
+ << BSON("_id" << 3 << "host"
+ << "node3"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU1"))
+ << BSON("_id" << 4 << "host"
+ << "node4"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU2"))
+ << BSON("_id" << 5 << "host"
+ << "node5"
+ << "arbiterOnly" << true))
+ << "settings"
+ << BSON("getLastErrorModes" << BSON(
+ "valid" << BSON("dc" << 2 << "rack" << 3) << "invalidNotEnoughValues"
+ << BSON("dc" << 3) << "invalidNotEnoughNodes" << BSON("rack" << 6))))));
getTopoCoord().updateConfig(configA, -1, Date_t());
std::vector<MemberConfig> memberConfig;
@@ -5323,18 +4840,14 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleACatchupTakeoverWhenElectableAndReceiveHeartbeatFromPrimaryInCatchup) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 6 << "host"
<< "host7:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -5357,22 +4870,16 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleACatchupTakeoverWhenBothCatchupAndPriorityTakeoverPossible) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 6 << "host"
<< "host7:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -5395,43 +4902,26 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleElectionIfAMajorityOfVotersIsVisibleEvenThoughATrueMajorityIsNot) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 3 << "host"
<< "host4:27017"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 4 << "host"
<< "host5:27017"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 5 << "host"
<< "host6:27017"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 6 << "host"
<< "host7:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5500,19 +4990,15 @@ TEST_F(HeartbeatResponseTestV1,
NodeDoesNotStandForElectionWhenPrimaryIsMarkedDownViaHeartbeatButWeAreAnArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
OpTime election = OpTime(Timestamp(400, 0), 0);
@@ -5613,19 +5099,15 @@ TEST_F(HeartbeatResponseTestV1,
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
OpTime election = OpTime(Timestamp(400, 0), 0);
@@ -5700,21 +5182,15 @@ TEST_F(HeartbeatResponseTestV1,
// multiprimary states in PV1.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5739,21 +5215,15 @@ TEST_F(HeartbeatResponseTestV1,
// multiprimary states in PV1.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
OpTime staleTime = OpTime();
@@ -5775,21 +5245,15 @@ TEST_F(HeartbeatResponseTestV1,
// multiprimary states in PV1.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
@@ -5812,21 +5276,15 @@ TEST_F(HeartbeatResponseTestV1,
// in all multiprimary states in PV1.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5983,21 +5441,15 @@ TEST_F(HeartbeatResponseTestV1, ShouldNotChangeSyncSourceWhenFresherMemberDoesNo
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes"
- << false
- << "priority"
- << 0))
- << "protocolVersion"
- << 1),
+ << "buildIndexes" << false << "priority" << 0))
+ << "protocolVersion" << 1),
0);
topoCoordSetMyLastAppliedOpTime(lastOpTimeApplied, Date_t(), false);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(
@@ -6311,18 +5763,14 @@ TEST_F(HeartbeatResponseHighVerbosityTestV1, UpdateHeartbeatDataSameConfig) {
originalConfig
.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)))
.transitional_ignore();
diff --git a/src/mongo/db/repl/vote_requester_test.cpp b/src/mongo/db/repl/vote_requester_test.cpp
index ca0bc29844e..7d2dc73cf8c 100644
--- a/src/mongo/db/repl/vote_requester_test.cpp
+++ b/src/mongo/db/repl/vote_requester_test.cpp
@@ -59,31 +59,23 @@ class VoteRequesterTest : public mongo::unittest::Test {
public:
virtual void setUp() {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1")
- << BSON("_id" << 2 << "host"
- << "host2")
- << BSON("_id" << 3 << "host"
- << "host3"
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 4 << "host"
- << "host4"
- << "votes"
- << 0
- << "priority"
- << 0)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes" << 0 << "priority"
+ << 0)))));
ASSERT_OK(config.validate());
long long candidateId = 0;
long long term = 2;
@@ -216,31 +208,23 @@ class VoteRequesterDryRunTest : public VoteRequesterTest {
public:
virtual void setUp() {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1")
- << BSON("_id" << 2 << "host"
- << "host2")
- << BSON("_id" << 3 << "host"
- << "host3"
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 4 << "host"
- << "host4"
- << "votes"
- << 0
- << "priority"
- << 0)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes" << 0 << "priority"
+ << 0)))));
ASSERT_OK(config.validate());
long long candidateId = 0;
long long term = 2;
@@ -261,11 +245,7 @@ public:
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0")
<< BSON("_id" << 1 << "host"
diff --git a/src/mongo/db/repl_index_build_state.h b/src/mongo/db/repl_index_build_state.h
index cd50f2c0289..363eba6eb94 100644
--- a/src/mongo/db/repl_index_build_state.h
+++ b/src/mongo/db/repl_index_build_state.h
@@ -146,9 +146,7 @@ private:
invariant(!name.empty(),
str::stream()
<< "Bad spec passed into ReplIndexBuildState constructor, missing '"
- << IndexDescriptor::kIndexNameFieldName
- << "' field: "
- << spec);
+ << IndexDescriptor::kIndexNameFieldName << "' field: " << spec);
indexNames.push_back(name);
}
return indexNames;
diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp
index f8e26472a8d..503f3f24d1f 100644
--- a/src/mongo/db/s/active_migrations_registry.cpp
+++ b/src/mongo/db/s/active_migrations_registry.cpp
@@ -148,9 +148,7 @@ Status ActiveMigrationsRegistry::ActiveMoveChunkState::constructErrorStatus() co
str::stream() << "Unable to start new migration because this shard is currently "
"donating chunk "
<< ChunkRange(args.getMinKey(), args.getMaxKey()).toString()
- << " for namespace "
- << args.getNss().ns()
- << " to "
+ << " for namespace " << args.getNss().ns() << " to "
<< args.getToShardId()};
}
@@ -158,10 +156,7 @@ Status ActiveMigrationsRegistry::ActiveReceiveChunkState::constructErrorStatus()
return {ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Unable to start new migration because this shard is currently "
"receiving chunk "
- << range.toString()
- << " for namespace "
- << nss.ns()
- << " from "
+ << range.toString() << " for namespace " << nss.ns() << " from "
<< fromShardId};
}
diff --git a/src/mongo/db/s/active_move_primaries_registry.cpp b/src/mongo/db/s/active_move_primaries_registry.cpp
index a02da4c899b..f71f7a63d80 100644
--- a/src/mongo/db/s/active_move_primaries_registry.cpp
+++ b/src/mongo/db/s/active_move_primaries_registry.cpp
@@ -90,9 +90,7 @@ Status ActiveMovePrimariesRegistry::ActiveMovePrimaryState::constructErrorStatus
str::stream()
<< "Unable to start new movePrimary operation because this shard is currently "
"moving its primary for namespace "
- << requestArgs.get_movePrimary().ns()
- << " to "
- << requestArgs.getTo()};
+ << requestArgs.get_movePrimary().ns() << " to " << requestArgs.getTo()};
}
ScopedMovePrimary::ScopedMovePrimary(ActiveMovePrimariesRegistry* registry,
diff --git a/src/mongo/db/s/active_move_primaries_registry.h b/src/mongo/db/s/active_move_primaries_registry.h
index 8ddd051478e..38b19a6c94f 100644
--- a/src/mongo/db/s/active_move_primaries_registry.h
+++ b/src/mongo/db/s/active_move_primaries_registry.h
@@ -159,4 +159,4 @@ private:
// This is the future, which will be signaled at the end of a movePrimary command.
std::shared_ptr<Notification<Status>> _completionNotification;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/s/active_move_primaries_registry_test.cpp b/src/mongo/db/s/active_move_primaries_registry_test.cpp
index 52b7d7daf6d..65fd968f377 100644
--- a/src/mongo/db/s/active_move_primaries_registry_test.cpp
+++ b/src/mongo/db/s/active_move_primaries_registry_test.cpp
@@ -27,9 +27,9 @@
* it in the license file.
*/
-#include "mongo/db/s/active_move_primaries_registry.h"
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/client.h"
+#include "mongo/db/s/active_move_primaries_registry.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/s/request_types/move_primary_gen.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/s/active_shard_collection_registry.cpp b/src/mongo/db/s/active_shard_collection_registry.cpp
index eb6c42923a6..6a01fdd90ee 100644
--- a/src/mongo/db/s/active_shard_collection_registry.cpp
+++ b/src/mongo/db/s/active_shard_collection_registry.cpp
@@ -134,11 +134,9 @@ Status ActiveShardCollectionRegistry::ActiveShardCollectionState::constructError
return {ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Unable to shard collection "
<< request.get_shardsvrShardCollection().get().ns()
- << " with arguments: "
- << request.toBSON()
+ << " with arguments: " << request.toBSON()
<< " because this shard is currently running shard collection on this "
- << "collection with arguments: "
- << activeRequest.toBSON()};
+ << "collection with arguments: " << activeRequest.toBSON()};
}
ScopedShardCollection::ScopedShardCollection(std::string nss,
diff --git a/src/mongo/db/s/add_shard_util.cpp b/src/mongo/db/s/add_shard_util.cpp
index 466d1a3fe6d..0dae94c0102 100644
--- a/src/mongo/db/s/add_shard_util.cpp
+++ b/src/mongo/db/s/add_shard_util.cpp
@@ -77,5 +77,5 @@ BSONObj createShardIdentityUpsertForAddShard(const AddShard& addShardCmd) {
return request.toBSON();
}
-} // namespace mongo
} // namespace add_shard_util
+} // namespace mongo
diff --git a/src/mongo/db/s/add_shard_util.h b/src/mongo/db/s/add_shard_util.h
index b7ab9fd0b36..020831833ba 100644
--- a/src/mongo/db/s/add_shard_util.h
+++ b/src/mongo/db/s/add_shard_util.h
@@ -60,5 +60,5 @@ AddShard createAddShardCmd(OperationContext* opCtx, const ShardId& shardName);
*/
BSONObj createShardIdentityUpsertForAddShard(const AddShard& addShardCmd);
-} // namespace mongo
} // namespace add_shard_util
+} // namespace mongo
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index 3162ca40daf..62677b3eafa 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -441,12 +441,10 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
if (chunkAtZoneMin.getMin().woCompare(tagRange.min)) {
return {ErrorCodes::IllegalOperation,
str::stream()
- << "Tag boundaries "
- << tagRange.toString()
+ << "Tag boundaries " << tagRange.toString()
<< " fall in the middle of an existing chunk "
<< ChunkRange(chunkAtZoneMin.getMin(), chunkAtZoneMin.getMax()).toString()
- << ". Balancing for collection "
- << nss.ns()
+ << ". Balancing for collection " << nss.ns()
<< " will be postponed until the chunk is split appropriately."};
}
@@ -462,12 +460,10 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
chunkAtZoneMax.getMax().woCompare(tagRange.max)) {
return {ErrorCodes::IllegalOperation,
str::stream()
- << "Tag boundaries "
- << tagRange.toString()
+ << "Tag boundaries " << tagRange.toString()
<< " fall in the middle of an existing chunk "
<< ChunkRange(chunkAtZoneMax.getMin(), chunkAtZoneMax.getMax()).toString()
- << ". Balancing for collection "
- << nss.ns()
+ << ". Balancing for collection " << nss.ns()
<< " will be postponed until the chunk is split appropriately."};
}
}
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index a12893339d1..97a18c7ae08 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -126,8 +126,7 @@ Status DistributionStatus::addRangeToZone(const ZoneRange& range) {
return {ErrorCodes::RangeOverlapConflict,
str::stream() << "Zone range: " << range.toString()
- << " is overlapping with existing: "
- << intersectingRange.toString()};
+ << " is overlapping with existing: " << intersectingRange.toString()};
}
// Check for containment
@@ -137,8 +136,7 @@ Status DistributionStatus::addRangeToZone(const ZoneRange& range) {
invariant(SimpleBSONObjComparator::kInstance.evaluate(range.max < nextRange.max));
return {ErrorCodes::RangeOverlapConflict,
str::stream() << "Zone range: " << range.toString()
- << " is overlapping with existing: "
- << nextRange.toString()};
+ << " is overlapping with existing: " << nextRange.toString()};
}
}
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index b131bbafde7..0a988cf1b13 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -517,7 +517,7 @@ void MigrationManager::_schedule(WithLock lock,
StatusWith<executor::TaskExecutor::CallbackHandle> callbackHandleWithStatus =
executor->scheduleRemoteCommand(
remoteRequest,
- [ this, service = opCtx->getServiceContext(), itMigration ](
+ [this, service = opCtx->getServiceContext(), itMigration](
const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
ThreadClient tc(getThreadName(), service);
auto opCtx = cc().makeOperationContext();
@@ -614,8 +614,7 @@ Status MigrationManager::_processRemoteCommandResponse(
scopedMigrationRequest->keepDocumentOnDestruct();
return {ErrorCodes::BalancerInterrupted,
stream() << "Migration interrupted because the balancer is stopping."
- << " Command status: "
- << remoteCommandResponse.status.toString()};
+ << " Command status: " << remoteCommandResponse.status.toString()};
}
if (!remoteCommandResponse.isOK()) {
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index ec2bdc8b12d..44ba02a9e2b 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -137,17 +137,17 @@ protected:
// Random static initialization order can result in X constructor running before Y constructor
// if X and Y are defined in different source files. Defining variables here to enforce order.
const BSONObj kShard0 =
- BSON(ShardType::name(kShardId0.toString()) << ShardType::host(kShardHost0.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId0.toString())
+ << ShardType::host(kShardHost0.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const BSONObj kShard1 =
- BSON(ShardType::name(kShardId1.toString()) << ShardType::host(kShardHost1.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId1.toString())
+ << ShardType::host(kShardHost1.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const BSONObj kShard2 =
- BSON(ShardType::name(kShardId2.toString()) << ShardType::host(kShardHost2.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId2.toString())
+ << ShardType::host(kShardHost2.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const BSONObj kShard3 =
- BSON(ShardType::name(kShardId3.toString()) << ShardType::host(kShardHost3.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId3.toString())
+ << ShardType::host(kShardHost3.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const KeyPattern kKeyPattern = KeyPattern(BSON(kPattern << 1));
diff --git a/src/mongo/db/s/balancer/scoped_migration_request.cpp b/src/mongo/db/s/balancer/scoped_migration_request.cpp
index a0ef6dadf16..40441637ba4 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request.cpp
@@ -118,8 +118,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
if (!statusWithMigrationQueryResult.isOK()) {
return statusWithMigrationQueryResult.getStatus().withContext(
str::stream() << "Failed to verify whether conflicting migration is in "
- << "progress for migration '"
- << redact(migrateInfo.toString())
+ << "progress for migration '" << redact(migrateInfo.toString())
<< "' while trying to query config.migrations.");
}
if (statusWithMigrationQueryResult.getValue().docs.empty()) {
@@ -134,11 +133,9 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
if (!statusWithActiveMigration.isOK()) {
return statusWithActiveMigration.getStatus().withContext(
str::stream() << "Failed to verify whether conflicting migration is in "
- << "progress for migration '"
- << redact(migrateInfo.toString())
+ << "progress for migration '" << redact(migrateInfo.toString())
<< "' while trying to parse active migration document '"
- << redact(activeMigrationBSON.toString())
- << "'.");
+ << redact(activeMigrationBSON.toString()) << "'.");
}
MigrateInfo activeMigrateInfo = statusWithActiveMigration.getValue().toMigrateInfo();
@@ -172,8 +169,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
str::stream() << "Failed to insert the config.migrations document after max "
<< "number of retries. Chunk '"
<< ChunkRange(migrateInfo.minKey, migrateInfo.maxKey).toString()
- << "' in collection '"
- << migrateInfo.nss.ns()
+ << "' in collection '" << migrateInfo.nss.ns()
<< "' was being moved (somewhere) by another operation.");
}
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index 7bc56741ae8..12325cfe830 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -165,8 +165,8 @@ public:
BSONObjIterator i(currKey);
for (int k = 0; k < keyPatternLength; k++) {
if (!i.more()) {
- errmsg = str::stream() << "index key " << currKey << " too short for pattern "
- << keyPattern;
+ errmsg = str::stream()
+ << "index key " << currKey << " too short for pattern " << keyPattern;
return false;
}
BSONElement currKeyElt = i.next();
@@ -192,8 +192,9 @@ public:
const string msg = str::stream()
<< "There are documents which have missing or incomplete shard key fields ("
- << redact(currKey) << "). Please ensure that all documents in the collection "
- "include all fields from the shard key.";
+ << redact(currKey)
+ << "). Please ensure that all documents in the collection "
+ "include all fields from the shard key.";
log() << "checkShardingIndex for '" << nss.toString() << "' failed: " << msg;
errmsg = msg;
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index cfe972510a7..049ab0ae261 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -176,8 +176,7 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx,
uassert(40618,
str::stream() << "failed to initialize cursor during auto split due to "
- << "connection problem with "
- << client.getServerAddress(),
+ << "connection problem with " << client.getServerAddress(),
cursor.get() != nullptr);
if (cursor->more()) {
@@ -273,8 +272,8 @@ void ChunkSplitter::trySplitting(std::shared_ptr<ChunkSplitStateDriver> chunkSpl
return;
}
_threadPool.schedule(
- [ this, csd = std::move(chunkSplitStateDriver), nss, min, max, dataWritten ](
- auto status) noexcept {
+ [ this, csd = std::move(chunkSplitStateDriver), nss, min, max,
+ dataWritten ](auto status) noexcept {
invariant(status);
_runAutosplit(csd, nss, min, max, dataWritten);
@@ -384,7 +383,8 @@ void ChunkSplitter::_runAutosplit(std::shared_ptr<ChunkSplitStateDriver> chunkSp
log() << "autosplitted " << nss << " chunk: " << redact(chunk.toString()) << " into "
<< (splitPoints.size() + 1) << " parts (maxChunkSizeBytes " << maxChunkSizeBytes
<< ")"
- << (topChunkMinKey.isEmpty() ? "" : " (top chunk migration suggested" +
+ << (topChunkMinKey.isEmpty() ? ""
+ : " (top chunk migration suggested" +
(std::string)(shouldBalance ? ")" : ", but no migrations allowed)"));
// Because the ShardServerOpObserver uses the metadata from the CSS for tracking incoming
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index 28eab0d23bb..303c8a7a602 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -89,9 +89,9 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx,
BSONObj keyPattern = metadata->getKeyPattern();
if (!startingFromKey.isEmpty()) {
if (!metadata->isValidKey(startingFromKey)) {
- *errMsg = str::stream() << "could not cleanup orphaned data, start key "
- << startingFromKey << " does not match shard key pattern "
- << keyPattern;
+ *errMsg = str::stream()
+ << "could not cleanup orphaned data, start key " << startingFromKey
+ << " does not match shard key pattern " << keyPattern;
log() << *errMsg;
return CleanupResult_Error;
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index 112499944af..9d9f48bab24 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -131,8 +131,7 @@ Status CollectionMetadata::checkChunkIsValid(const ChunkType& chunk) const {
return {ErrorCodes::StaleShardVersion,
str::stream() << "Unable to find chunk with the exact bounds "
<< ChunkRange(chunk.getMin(), chunk.getMax()).toString()
- << " at collection version "
- << getCollVersion().toString()};
+ << " at collection version " << getCollVersion().toString()};
}
return Status::OK();
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index d125e651adc..34ff588020f 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -132,8 +132,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInTheFuture) {
{
BSONObj readConcern = BSON("readConcern" << BSON("level"
<< "snapshot"
- << "atClusterTime"
- << Timestamp(100, 0)));
+ << "atClusterTime" << Timestamp(100, 0)));
auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
@@ -163,8 +162,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInThePast) {
{
BSONObj readConcern = BSON("readConcern" << BSON("level"
<< "snapshot"
- << "atClusterTime"
- << Timestamp(50, 0)));
+ << "atClusterTime" << Timestamp(50, 0)));
auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
@@ -202,8 +200,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsTooFarInThePastThrowsStal
{
BSONObj readConcern = BSON("readConcern" << BSON("level"
<< "snapshot"
- << "atClusterTime"
- << Timestamp(10, 0)));
+ << "atClusterTime" << Timestamp(10, 0)));
auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 33f81707841..afcaf6be496 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -111,8 +111,7 @@ TEST_F(NoChunkFixture, IsValidKey) {
ASSERT(makeCollectionMetadata()->isValidKey(BSON("a" << 3)));
ASSERT(!makeCollectionMetadata()->isValidKey(BSON("a"
<< "abcde"
- << "b"
- << 1)));
+ << "b" << 1)));
ASSERT(!makeCollectionMetadata()->isValidKey(BSON("c"
<< "abcde")));
}
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index 1b63c1ce74c..d5affc26cc0 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -178,14 +178,8 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
NamespaceString::kServerConfigurationNamespace.ns(),
BSON("_id"
<< "startRangeDeletion"
- << "ns"
- << nss.ns()
- << "epoch"
- << epoch
- << "min"
- << range->getMin()
- << "max"
- << range->getMax()));
+ << "ns" << nss.ns() << "epoch" << epoch << "min"
+ << range->getMin() << "max" << range->getMax()));
} catch (const DBException& e) {
stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
csr->_metadataManager->_clearAllCleanups(
@@ -354,8 +348,8 @@ StatusWith<int> CollectionRangeDeleter::_doDeletion(OperationContext* opCtx,
auto catalog = collection->getIndexCatalog();
const IndexDescriptor* idx = catalog->findShardKeyPrefixedIndex(opCtx, keyPattern, false);
if (!idx) {
- std::string msg = str::stream() << "Unable to find shard key index for "
- << keyPattern.toString() << " in " << nss.ns();
+ std::string msg = str::stream()
+ << "Unable to find shard key index for " << keyPattern.toString() << " in " << nss.ns();
LOG(0) << msg;
return {ErrorCodes::InternalError, msg};
}
@@ -375,8 +369,8 @@ StatusWith<int> CollectionRangeDeleter::_doDeletion(OperationContext* opCtx,
const IndexDescriptor* descriptor =
collection->getIndexCatalog()->findIndexByName(opCtx, indexName);
if (!descriptor) {
- std::string msg = str::stream() << "shard key index with name " << indexName << " on '"
- << nss.ns() << "' was dropped";
+ std::string msg = str::stream()
+ << "shard key index with name " << indexName << " on '" << nss.ns() << "' was dropped";
LOG(0) << msg;
return {ErrorCodes::InternalError, msg};
}
diff --git a/src/mongo/db/s/collection_range_deleter.h b/src/mongo/db/s/collection_range_deleter.h
index 6fae0ee5d18..0ebc79ac8a6 100644
--- a/src/mongo/db/s/collection_range_deleter.h
+++ b/src/mongo/db/s/collection_range_deleter.h
@@ -59,14 +59,14 @@ class CollectionRangeDeleter {
public:
/**
- * This is an object n that asynchronously changes state when a scheduled range deletion
- * completes or fails. Call n.ready() to discover if the event has already occurred. Call
- * n.waitStatus(opCtx) to sleep waiting for the event, and get its result. If the wait is
- * interrupted, waitStatus throws.
- *
- * It is an error to destroy a returned CleanupNotification object n unless either n.ready()
- * is true or n.abandon() has been called. After n.abandon(), n is in a moved-from state.
- */
+ * This is an object n that asynchronously changes state when a scheduled range deletion
+ * completes or fails. Call n.ready() to discover if the event has already occurred. Call
+ * n.waitStatus(opCtx) to sleep waiting for the event, and get its result. If the wait is
+ * interrupted, waitStatus throws.
+ *
+ * It is an error to destroy a returned CleanupNotification object n unless either n.ready()
+ * is true or n.abandon() has been called. After n.abandon(), n is in a moved-from state.
+ */
class DeleteNotification {
public:
DeleteNotification();
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index 684ae740a00..1fc98f41876 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -162,8 +162,7 @@ Status CollectionShardingRuntime::waitForClean(OperationContext* opCtx,
Status result = stillScheduled->waitStatus(opCtx);
if (!result.isOK()) {
return result.withContext(str::stream() << "Failed to delete orphaned " << nss.ns()
- << " range "
- << orphanRange.toString());
+ << " range " << orphanRange.toString());
}
}
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index d085f9440f3..1ee6cfbeed8 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -80,12 +80,9 @@ TEST_F(DeleteStateTest, MakeDeleteStateUnsharded) {
auto doc = BSON("key3"
<< "abc"
- << "key"
- << 3
- << "_id"
+ << "key" << 3 << "_id"
<< "hello"
- << "key2"
- << true);
+ << "key2" << true);
// Check that an order for deletion from an unsharded collection extracts just the "_id" field
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
@@ -103,12 +100,9 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithoutIdInShardKey) {
// The order of fields in `doc` deliberately does not match the shard key
auto doc = BSON("key3"
<< "abc"
- << "key"
- << 100
- << "_id"
+ << "key" << 100 << "_id"
<< "hello"
- << "key2"
- << true);
+ << "key2" << true);
// Verify the shard key is extracted, in correct order, followed by the "_id" field.
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
@@ -130,15 +124,13 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdInShardKey) {
<< "abc"
<< "_id"
<< "hello"
- << "key"
- << 100);
+ << "key" << 100);
// Verify the shard key is extracted with "_id" in the right place.
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
BSON("key" << 100 << "_id"
<< "hello"
- << "key2"
- << true));
+ << "key2" << true));
ASSERT_FALSE(OpObserverShardingImpl::isMigrating(operationContext(), kTestNss, doc));
}
@@ -151,8 +143,7 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdHashInShardKey) {
auto doc = BSON("key2" << true << "_id"
<< "hello"
- << "key"
- << 100);
+ << "key" << 100);
// Verify the shard key is extracted with "_id" in the right place, not hashed.
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
diff --git a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
index b1c3717f3ff..e9ca1356b62 100644
--- a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
+++ b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
@@ -50,8 +50,8 @@
namespace mongo {
-using std::shared_ptr;
using std::set;
+using std::shared_ptr;
using std::string;
namespace {
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
index f0272e1a92c..5e37c8cf0eb 100644
--- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
@@ -160,10 +160,9 @@ public:
if (!toShardStatus.isOK()) {
log() << "Could not move database '" << dbname << "' to shard '" << to
<< causedBy(toShardStatus.getStatus());
- uassertStatusOKWithContext(
- toShardStatus.getStatus(),
- str::stream() << "Could not move database '" << dbname << "' to shard '" << to
- << "'");
+ uassertStatusOKWithContext(toShardStatus.getStatus(),
+ str::stream() << "Could not move database '" << dbname
+ << "' to shard '" << to << "'");
}
return toShardStatus.getValue();
diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
index 2f39f852bc8..5186128ef8c 100644
--- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
@@ -112,8 +112,8 @@ public:
const auto shardStatus =
Grid::get(opCtx)->shardRegistry()->getShard(opCtx, ShardId(target));
if (!shardStatus.isOK()) {
- std::string msg(str::stream() << "Could not drop shard '" << target
- << "' because it does not exist");
+ std::string msg(str::stream()
+ << "Could not drop shard '" << target << "' because it does not exist");
log() << msg;
uasserted(ErrorCodes::ShardNotFound, msg);
}
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index 3cdb4d2e5d3..b63165e4517 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -158,8 +158,7 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation));
uassert(ErrorCodes::BadValue,
str::stream() << "The collation for shardCollection must be {locale: 'simple'}, "
- << "but found: "
- << collation,
+ << "but found: " << collation,
!collator);
simpleCollationSpecified = true;
}
@@ -173,8 +172,7 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
int numChunks = request->getNumInitialChunks();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "numInitialChunks cannot be more than either: "
- << maxNumInitialChunksForShards
- << ", 8192 * number of shards; or "
+ << maxNumInitialChunksForShards << ", 8192 * number of shards; or "
<< maxNumInitialChunksTotal,
numChunks >= 0 && numChunks <= maxNumInitialChunksForShards &&
numChunks <= maxNumInitialChunksTotal);
@@ -303,9 +301,7 @@ void validateShardKeyAgainstExistingIndexes(OperationContext* opCtx,
bool isUnique = idx["unique"].trueValue();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection '" << nss.ns() << "' with unique index on "
- << currentKey
- << " and proposed shard key "
- << proposedKey
+ << currentKey << " and proposed shard key " << proposedKey
<< ". Uniqueness can't be maintained unless shard key is a prefix",
!isUnique || shardKeyPattern.isUniqueIndexCompatible(currentKey));
}
@@ -323,8 +319,7 @@ void validateShardKeyAgainstExistingIndexes(OperationContext* opCtx,
// per field per collection.
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection " << nss.ns()
- << " with hashed shard key "
- << proposedKey
+ << " with hashed shard key " << proposedKey
<< " because the hashed index uses a non-default seed of "
<< idx["seed"].numberInt(),
!shardKeyPattern.isHashedPattern() || idx["seed"].eoo() ||
@@ -438,9 +433,7 @@ void migrateAndFurtherSplitInitialChunks(OperationContext* opCtx,
auto chunkManager = routingInfo.cm();
// Move and commit each "big chunk" to a different shard.
- auto nextShardId = [&, indx = 0 ]() mutable {
- return shardIds[indx++ % shardIds.size()];
- };
+ auto nextShardId = [&, indx = 0]() mutable { return shardIds[indx++ % shardIds.size()]; };
for (auto chunk : chunkManager->chunks()) {
const auto shardId = nextShardId();
@@ -553,10 +546,7 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx,
uassert(ErrorCodes::InternalError,
str::stream() << "expected the primary shard host " << primaryShard->getConnString()
- << " for database "
- << nss.db()
- << " to return an entry for "
- << nss.ns()
+ << " for database " << nss.db() << " to return an entry for " << nss.ns()
<< " in its listCollections response, but it did not",
!res.isEmpty());
@@ -568,15 +558,12 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx,
uassert(ErrorCodes::InternalError,
str::stream() << "expected primary shard to return 'info' field as part of "
"listCollections for "
- << nss.ns()
- << ", but got "
- << res,
+ << nss.ns() << ", but got " << res,
!collectionInfo.isEmpty());
uassert(ErrorCodes::InternalError,
str::stream() << "expected primary shard to return a UUID for collection " << nss.ns()
- << " as part of 'info' field but got "
- << res,
+ << " as part of 'info' field but got " << res,
collectionInfo.hasField("uuid"));
return uassertStatusOK(UUID::parse(collectionInfo["uuid"]));
@@ -806,8 +793,7 @@ public:
if (fromMapReduce) {
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Map reduce with sharded output to a new collection found "
- << nss.ns()
- << " to be non-empty which is not supported.",
+ << nss.ns() << " to be non-empty which is not supported.",
isEmpty);
}
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 52d363c6e8c..ca164a0264f 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -222,7 +222,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
const auto& keyPattern = shardKeyPattern.getKeyPattern();
- auto nextShardIdForHole = [&, indx = 0 ]() mutable {
+ auto nextShardIdForHole = [&, indx = 0]() mutable {
return shardIdsForGaps[indx++ % shardIdsForGaps.size()];
};
@@ -249,10 +249,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
const auto& shardIdsForChunk = it->second;
uassert(50973,
str::stream()
- << "Cannot shard collection "
- << nss.ns()
- << " due to zone "
- << tag.getTag()
+ << "Cannot shard collection " << nss.ns() << " due to zone " << tag.getTag()
<< " which is not assigned to a shard. Please assign this zone to a shard.",
!shardIdsForChunk.empty());
@@ -395,7 +392,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::createFirstChunksU
shardSelectedSplitPoints,
shardIds,
1 // numContiguousChunksPerShard
- );
+ );
}
boost::optional<CollectionType> InitialSplitPolicy::checkIfCollectionAlreadyShardedWithSameOptions(
@@ -424,8 +421,7 @@ boost::optional<CollectionType> InitialSplitPolicy::checkIfCollectionAlreadyShar
// match the options the collection was originally sharded with.
uassert(ErrorCodes::AlreadyInitialized,
str::stream() << "sharding already enabled for collection " << nss.ns()
- << " with options "
- << existingOptions.toString(),
+ << " with options " << existingOptions.toString(),
requestedOptions.hasSameOptions(existingOptions));
return existingOptions;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index fc610ed35a3..424db73a9d0 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -198,8 +198,7 @@ Status ShardingCatalogManager::_initConfigVersion(OperationContext* opCtx) {
if (versionInfo.getCurrentVersion() < CURRENT_CONFIG_VERSION) {
return {ErrorCodes::IncompatibleShardingConfigVersion,
str::stream() << "need to upgrade current cluster version to v"
- << CURRENT_CONFIG_VERSION
- << "; currently at v"
+ << CURRENT_CONFIG_VERSION << "; currently at v"
<< versionInfo.getCurrentVersion()};
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index ce9460cf3e6..61d1439379e 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -129,8 +129,9 @@ protected:
ASSERT_EQ(request.target, target);
ASSERT_EQ(request.dbname, nss.db());
ASSERT_BSONOBJ_EQ(request.cmdObj,
- BSON("drop" << nss.coll() << "writeConcern" << BSON("w"
- << "majority")));
+ BSON("drop" << nss.coll() << "writeConcern"
+ << BSON("w"
+ << "majority")));
ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
return BSON("ok" << 1);
@@ -146,8 +147,7 @@ protected:
ASSERT_BSONOBJ_EQ(request.cmdObj,
BSON("setFeatureCompatibilityVersion"
<< "4.2"
- << "writeConcern"
- << writeConcern));
+ << "writeConcern" << writeConcern));
return response;
});
@@ -315,18 +315,16 @@ protected:
* describing the addShard request for 'addedShard'.
*/
void assertChangeWasLogged(const ShardType& addedShard) {
- auto response = assertGet(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{
- ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString("config.changelog"),
- BSON("what"
- << "addShard"
- << "details.name"
- << addedShard.getName()),
- BSONObj(),
- 1));
+ auto response = assertGet(getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString("config.changelog"),
+ BSON("what"
+ << "addShard"
+ << "details.name" << addedShard.getName()),
+ BSONObj(),
+ 1));
ASSERT_EQ(1U, response.docs.size());
auto logEntryBSON = response.docs.front();
auto logEntry = assertGet(ChangeLogType::fromBSON(logEntryBSON));
@@ -347,35 +345,24 @@ protected:
TEST_F(AddShardTest, CreateShardIdentityUpsertForAddShard) {
std::string shardName = "shardName";
- BSONObj expectedBSON = BSON("update"
- << "system.version"
- << "bypassDocumentValidation"
- << false
- << "ordered"
- << true
- << "updates"
- << BSON_ARRAY(BSON(
- "q"
- << BSON("_id"
- << "shardIdentity")
- << "u"
- << BSON("shardName" << shardName << "clusterId" << _clusterId
- << "configsvrConnectionString"
- << replicationCoordinator()
- ->getConfig()
- .getConnectionString()
- .toString())
- << "multi"
- << false
- << "upsert"
- << true))
- << "writeConcern"
- << BSON("w"
- << "majority"
- << "wtimeout"
- << 60000)
- << "allowImplicitCollectionCreation"
- << true);
+ BSONObj expectedBSON = BSON(
+ "update"
+ << "system.version"
+ << "bypassDocumentValidation" << false << "ordered" << true << "updates"
+ << BSON_ARRAY(BSON(
+ "q" << BSON("_id"
+ << "shardIdentity")
+ << "u"
+ << BSON(
+ "shardName"
+ << shardName << "clusterId" << _clusterId << "configsvrConnectionString"
+ << replicationCoordinator()->getConfig().getConnectionString().toString())
+ << "multi" << false << "upsert" << true))
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout" << 60000)
+ << "allowImplicitCollectionCreation" << true);
auto addShardCmd = add_shard_util::createAddShardCmd(operationContext(), shardName);
auto actualBSON = add_shard_util::createShardIdentityUpsertForAddShard(addShardCmd);
ASSERT_BSONOBJ_EQ(expectedBSON, actualBSON);
@@ -427,8 +414,7 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
@@ -508,8 +494,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
@@ -648,8 +633,7 @@ TEST_F(AddShardTest, AddReplicaSetShardAsStandalone) {
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "myOtherSet"
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -706,8 +690,7 @@ TEST_F(AddShardTest, ReplicaSetMistmatchedReplicaSetName) {
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "myOtherSet"
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -735,12 +718,10 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) {
"as a shard since it is a config server");
});
- BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
- << "config"
- << "configsvr"
- << true
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ BSONObj commandResponse =
+ BSON("ok" << 1 << "ismaster" << true << "setName"
+ << "config"
+ << "configsvr" << true << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -772,9 +753,7 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) {
hosts.append("host1:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -808,9 +787,7 @@ TEST_F(AddShardTest, AddShardWithNameConfigFails) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -855,9 +832,7 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -900,9 +875,7 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -966,9 +939,7 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -1049,8 +1020,7 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 2cf8b41864a..86aa76b89dc 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -129,8 +129,7 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk
BSON("query" << BSON(ChunkType::ns(chunk.getNS().ns())
<< ChunkType::min(chunk.getMin())
<< ChunkType::max(chunk.getMax()))
- << "orderby"
- << BSON(ChunkType::lastmod() << -1)));
+ << "orderby" << BSON(ChunkType::lastmod() << -1)));
b.append("res",
BSON(ChunkType::epoch(collVersion.epoch())
<< ChunkType::shard(chunk.getShard().toString())));
@@ -146,8 +145,7 @@ Status checkChunkIsOnShard(OperationContext* opCtx,
const ShardId& shard) {
BSONObj chunkQuery =
BSON(ChunkType::ns() << nss.ns() << ChunkType::min() << min << ChunkType::max() << max
- << ChunkType::shard()
- << shard);
+ << ChunkType::shard() << shard);
// Must use local read concern because we're going to perform subsequent writes.
auto findResponseWith =
@@ -166,8 +164,7 @@ Status checkChunkIsOnShard(OperationContext* opCtx,
if (findResponseWith.getValue().docs.empty()) {
return {ErrorCodes::Error(40165),
str::stream()
- << "Could not find the chunk ("
- << chunkQuery.toString()
+ << "Could not find the chunk (" << chunkQuery.toString()
<< ") on the shard. Cannot execute the migration commit with invalid chunks."};
}
@@ -345,18 +342,14 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
return {
ErrorCodes::InvalidOptions,
str::stream() << "Split keys must be specified in strictly increasing order. Key "
- << endKey
- << " was specified after "
- << startKey
- << "."};
+ << endKey << " was specified after " << startKey << "."};
}
// Verify that splitPoints are not repeated
if (endKey.woCompare(startKey) == 0) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Split on lower bound of chunk "
- << ChunkRange(startKey, endKey).toString()
- << "is not allowed"};
+ << ChunkRange(startKey, endKey).toString() << "is not allowed"};
}
// verify that splits don't create too-big shard keys
@@ -419,10 +412,8 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
b.append("ns", ChunkType::ConfigNS.ns());
b.append("q",
BSON("query" << BSON(ChunkType::ns(nss.ns()) << ChunkType::min() << range.getMin()
- << ChunkType::max()
- << range.getMax())
- << "orderby"
- << BSON(ChunkType::lastmod() << -1)));
+ << ChunkType::max() << range.getMax())
+ << "orderby" << BSON(ChunkType::lastmod() << -1)));
{
BSONObjBuilder bb(b.subobjStart("res"));
bb.append(ChunkType::epoch(), requestEpoch);
@@ -544,10 +535,7 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
ErrorCodes::InvalidOptions,
str::stream()
<< "Chunk boundaries must be specified in strictly increasing order. Boundary "
- << chunkBoundaries[i]
- << " was specified after "
- << itChunk.getMin()
- << "."};
+ << chunkBoundaries[i] << " was specified after " << itChunk.getMin() << "."};
}
itChunk.setMax(chunkBoundaries[i]);
@@ -660,11 +648,9 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
<< "' has been dropped and recreated since the migration began."
" The config server's collection version epoch is now '"
<< currentCollectionVersion.epoch().toString()
- << "', but the shard's is "
- << collectionEpoch.toString()
+ << "', but the shard's is " << collectionEpoch.toString()
<< "'. Aborting migration commit for chunk ("
- << migratedChunk.getRange().toString()
- << ")."};
+ << migratedChunk.getRange().toString() << ")."};
}
// Check that migratedChunk is where it should be, on fromShard.
@@ -827,9 +813,7 @@ StatusWith<ChunkVersion> ShardingCatalogManager::_findCollectionVersion(
<< "' has been dropped and recreated since the migration began."
" The config server's collection version epoch is now '"
<< currentCollectionVersion.epoch().toString()
- << "', but the shard's is "
- << collectionEpoch.toString()
- << "'."};
+ << "', but the shard's is " << collectionEpoch.toString() << "'."};
}
return currentCollectionVersion;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 66825b32c47..a442232d4c3 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -75,9 +75,9 @@
namespace mongo {
using CollectionUUID = UUID;
+using std::set;
using std::string;
using std::vector;
-using std::set;
namespace {
@@ -114,8 +114,8 @@ boost::optional<UUID> checkCollectionOptions(OperationContext* opCtx,
// TODO: SERVER-33048 check idIndex field
uassert(ErrorCodes::NamespaceExists,
- str::stream() << "ns: " << ns.ns() << " already exists with different options: "
- << actualOptions.toBSON(),
+ str::stream() << "ns: " << ns.ns()
+ << " already exists with different options: " << actualOptions.toBSON(),
options.matchesStorageOptions(
actualOptions, CollatorFactoryInterface::get(opCtx->getServiceContext())));
@@ -171,8 +171,7 @@ void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss)
str::stream() << "A previous attempt to shard collection " << nss.ns()
<< " failed after writing some initial chunks to config.chunks. Please "
"manually delete the partially written chunks for collection "
- << nss.ns()
- << " from config.chunks",
+ << nss.ns() << " from config.chunks",
numChunks == 0);
}
@@ -433,7 +432,7 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
optimizationType,
treatAsEmpty,
1 // numContiguousChunksPerShard
- );
+ );
} else {
initialChunks = InitialSplitPolicy::createFirstChunksUnoptimized(
opCtx, nss, fieldsAndOrder, dbPrimaryShardId);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index f66e29f6f74..8a2a0b0490e 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -91,10 +91,7 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
uassert(ErrorCodes::DatabaseDifferCase,
str::stream() << "can't have 2 databases that just differ on case "
- << " have: "
- << actualDbName
- << " want to add: "
- << dbName,
+ << " have: " << actualDbName << " want to add: " << dbName,
actualDbName == dbName);
// We did a local read of the database entry above and found that the database already
@@ -251,8 +248,7 @@ Status ShardingCatalogManager::commitMovePrimary(OperationContext* opCtx,
// are holding the dist lock during the movePrimary operation.
uassert(ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "Tried to update primary shard for database '" << dbname
- << " with version "
- << currentDatabaseVersion.getLastMod(),
+ << " with version " << currentDatabaseVersion.getLastMod(),
updateStatus.getValue());
// Ensure the next attempt to retrieve the database or any of its collections will do a full
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
index 7452b250f14..52681ef3bdd 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
@@ -141,15 +141,13 @@ TEST_F(EnableShardingTest, dbExistsInvalidFormat) {
setupShards(vector<ShardType>{shard});
// Set up database with bad type for primary field.
- ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- DatabaseType::ConfigNS,
- BSON("_id"
- << "db6"
- << "primary"
- << 12
- << "partitioned"
- << false),
- ShardingCatalogClient::kMajorityWriteConcern));
+ ASSERT_OK(
+ catalogClient()->insertConfigDocument(operationContext(),
+ DatabaseType::ConfigNS,
+ BSON("_id"
+ << "db6"
+ << "primary" << 12 << "partitioned" << false),
+ ShardingCatalogClient::kMajorityWriteConcern));
ASSERT_THROWS_CODE(
ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db6"),
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 8e6e2e29423..066405d32b8 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -181,19 +181,17 @@ StatusWith<Shard::CommandResponse> ShardingCatalogManager::_runCommandForAddShar
Status commandStatus = getStatusFromCommandResult(result);
if (!Shard::shouldErrorBePropagated(commandStatus.code())) {
- commandStatus = {ErrorCodes::OperationFailed,
- str::stream() << "failed to run command " << cmdObj
- << " when attempting to add shard "
- << targeter->connectionString().toString()
- << causedBy(commandStatus)};
+ commandStatus = {
+ ErrorCodes::OperationFailed,
+ str::stream() << "failed to run command " << cmdObj << " when attempting to add shard "
+ << targeter->connectionString().toString() << causedBy(commandStatus)};
}
Status writeConcernStatus = getWriteConcernStatusFromCommandResult(result);
if (!Shard::shouldErrorBePropagated(writeConcernStatus.code())) {
writeConcernStatus = {ErrorCodes::OperationFailed,
str::stream() << "failed to satisfy writeConcern for command "
- << cmdObj
- << " when attempting to add shard "
+ << cmdObj << " when attempting to add shard "
<< targeter->connectionString().toString()
<< causedBy(writeConcernStatus)};
}
@@ -257,8 +255,7 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExis
} else {
return {ErrorCodes::IllegalOperation,
str::stream() << "A shard already exists containing the replica set '"
- << existingShardConnStr.getSetName()
- << "'"};
+ << existingShardConnStr.getSetName() << "'"};
}
}
@@ -277,10 +274,8 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExis
return {ErrorCodes::IllegalOperation,
str::stream() << "'" << addingHost.toString() << "' "
<< "is already a member of the existing shard '"
- << existingShard.getHost()
- << "' ("
- << existingShard.getName()
- << ")."};
+ << existingShard.getHost() << "' ("
+ << existingShard.getName() << ")."};
}
}
}
@@ -340,8 +335,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!status.isOK()) {
return status.withContext(str::stream() << "isMaster returned invalid 'maxWireVersion' "
<< "field when attempting to add "
- << connectionString.toString()
- << " as a shard");
+ << connectionString.toString() << " as a shard");
}
if (serverGlobalParams.featureCompatibility.getVersion() >
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40) {
@@ -362,8 +356,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!status.isOK()) {
return status.withContext(str::stream() << "isMaster returned invalid 'ismaster' "
<< "field when attempting to add "
- << connectionString.toString()
- << " as a shard");
+ << connectionString.toString() << " as a shard");
}
if (!isMaster) {
return {ErrorCodes::NotMaster,
@@ -387,8 +380,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!providedSetName.empty() && foundSetName.empty()) {
return {ErrorCodes::OperationFailed,
str::stream() << "host did not return a set name; "
- << "is the replica set still initializing? "
- << resIsMaster};
+ << "is the replica set still initializing? " << resIsMaster};
}
// Make sure the set name specified in the connection string matches the one where its hosts
@@ -396,8 +388,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!providedSetName.empty() && (providedSetName != foundSetName)) {
return {ErrorCodes::OperationFailed,
str::stream() << "the provided connection string (" << connectionString.toString()
- << ") does not match the actual set name "
- << foundSetName};
+ << ") does not match the actual set name " << foundSetName};
}
// Is it a config server?
@@ -437,11 +428,8 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (hostSet.find(host) == hostSet.end()) {
return {ErrorCodes::OperationFailed,
str::stream() << "in seed list " << connectionString.toString() << ", host "
- << host
- << " does not belong to replica set "
- << foundSetName
- << "; found "
- << resIsMaster.toString()};
+ << host << " does not belong to replica set " << foundSetName
+ << "; found " << resIsMaster.toString()};
}
}
}
@@ -611,13 +599,9 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
const auto& dbDoc = dbt.getValue().value;
return Status(ErrorCodes::OperationFailed,
str::stream() << "can't add shard "
- << "'"
- << shardConnectionString.toString()
- << "'"
- << " because a local database '"
- << dbName
- << "' exists in another "
- << dbDoc.getPrimary());
+ << "'" << shardConnectionString.toString() << "'"
+ << " because a local database '" << dbName
+ << "' exists in another " << dbDoc.getPrimary());
} else if (dbt != ErrorCodes::NamespaceNotFound) {
return dbt.getStatus();
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
index 34b03b338d3..f0c13ec3fef 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
@@ -154,17 +154,13 @@ StatusWith<ChunkRange> includeFullShardKey(OperationContext* opCtx,
if (!range.getMin().isFieldNamePrefixOf(shardKeyBSON)) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "min: " << range.getMin() << " is not a prefix of the shard key "
- << shardKeyBSON
- << " of ns: "
- << nss.ns()};
+ << shardKeyBSON << " of ns: " << nss.ns()};
}
if (!range.getMax().isFieldNamePrefixOf(shardKeyBSON)) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "max: " << range.getMax() << " is not a prefix of the shard key "
- << shardKeyBSON
- << " of ns: "
- << nss.ns()};
+ << shardKeyBSON << " of ns: " << nss.ns()};
}
return ChunkRange(shardKeyPattern.extendRangeBound(range.getMin(), false),
diff --git a/src/mongo/db/s/config_server_op_observer_test.cpp b/src/mongo/db/s/config_server_op_observer_test.cpp
index fc5ff24708d..eca0a3a19b5 100644
--- a/src/mongo/db/s/config_server_op_observer_test.cpp
+++ b/src/mongo/db/s/config_server_op_observer_test.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/db/s/config_server_op_observer.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/config_server_op_observer.h"
#include "mongo/s/cluster_identity_loader.h"
#include "mongo/s/config_server_test_fixture.h"
#include "mongo/unittest/death_test.h"
diff --git a/src/mongo/db/s/flush_database_cache_updates_command.cpp b/src/mongo/db/s/flush_database_cache_updates_command.cpp
index 13429421b43..77728821151 100644
--- a/src/mongo/db/s/flush_database_cache_updates_command.cpp
+++ b/src/mongo/db/s/flush_database_cache_updates_command.cpp
@@ -119,8 +119,7 @@ public:
uasserted(ErrorCodes::NamespaceNotFound,
str::stream()
<< "Can't issue _flushDatabaseCacheUpdates on the database "
- << _dbName()
- << " because it does not exist on this shard.");
+ << _dbName() << " because it does not exist on this shard.");
}
// If the primary is in the critical section, secondaries must wait for the commit
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 0a808e8daac..75ea7635773 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -79,16 +79,13 @@ void mergeChunks(OperationContext* opCtx,
const BSONObj& minKey,
const BSONObj& maxKey,
const OID& epoch) {
- const std::string whyMessage = str::stream() << "merging chunks in " << nss.ns() << " from "
- << minKey << " to " << maxKey;
+ const std::string whyMessage = str::stream()
+ << "merging chunks in " << nss.ns() << " from " << minKey << " to " << maxKey;
auto scopedDistLock = uassertStatusOKWithContext(
Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout),
str::stream() << "could not acquire collection lock for " << nss.ns()
- << " to merge chunks in ["
- << redact(minKey)
- << ", "
- << redact(maxKey)
+ << " to merge chunks in [" << redact(minKey) << ", " << redact(maxKey)
<< ")");
auto const shardingState = ShardingState::get(opCtx);
@@ -109,20 +106,14 @@ void mergeChunks(OperationContext* opCtx,
const auto shardVersion = metadata->getShardVersion();
uassert(ErrorCodes::StaleEpoch,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " has changed since merge was sent (sent epoch: "
- << epoch.toString()
- << ", current epoch: "
- << shardVersion.epoch()
- << ")",
+ << " has changed since merge was sent (sent epoch: " << epoch.toString()
+ << ", current epoch: " << shardVersion.epoch() << ")",
shardVersion.epoch() == epoch);
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, the range "
- << redact(ChunkRange(minKey, maxKey).toString())
- << " is not valid"
- << " for collection "
- << nss.ns()
- << " with key pattern "
+ << redact(ChunkRange(minKey, maxKey).toString()) << " is not valid"
+ << " for collection " << nss.ns() << " with key pattern "
<< metadata->getKeyPattern().toString(),
metadata->isValidKey(minKey) && metadata->isValidKey(maxKey));
@@ -145,11 +136,8 @@ void mergeChunks(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " range starting at "
- << redact(minKey)
- << " and ending at "
- << redact(maxKey)
- << " does not belong to shard "
+ << " range starting at " << redact(minKey) << " and ending at "
+ << redact(maxKey) << " does not belong to shard "
<< shardingState->shardId(),
!chunksToMerge.empty());
@@ -164,9 +152,7 @@ void mergeChunks(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " range starting at "
- << redact(minKey)
- << " does not belong to shard "
+ << " range starting at " << redact(minKey) << " does not belong to shard "
<< shardingState->shardId(),
minKeyInRange);
@@ -177,9 +163,7 @@ void mergeChunks(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " range ending at "
- << redact(maxKey)
- << " does not belong to shard "
+ << " range ending at " << redact(maxKey) << " does not belong to shard "
<< shardingState->shardId(),
maxKeyInRange);
@@ -205,11 +189,8 @@ void mergeChunks(OperationContext* opCtx,
uassert(
ErrorCodes::IllegalOperation,
str::stream()
- << "could not merge chunks, collection "
- << nss.ns()
- << " has a hole in the range "
- << ChunkRange(minKey, maxKey).toString()
- << " at "
+ << "could not merge chunks, collection " << nss.ns() << " has a hole in the range "
+ << ChunkRange(minKey, maxKey).toString() << " at "
<< ChunkRange(chunksToMerge[i - 1].getMax(), chunksToMerge[i].getMin()).toString(),
chunksToMerge[i - 1].getMax().woCompare(chunksToMerge[i].getMin()) == 0);
}
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index b111875db39..4926fe86508 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -143,7 +143,7 @@ void scheduleCleanup(executor::TaskExecutor* executor,
Date_t when) {
LOG(1) << "Scheduling cleanup on " << nss.ns() << " at " << when;
auto swCallbackHandle = executor->scheduleWorkAt(
- when, [ executor, nss = std::move(nss), epoch = std::move(epoch) ](auto& args) {
+ when, [executor, nss = std::move(nss), epoch = std::move(epoch)](auto& args) {
auto& status = args.status;
if (ErrorCodes::isCancelationError(status.code())) {
return;
@@ -229,11 +229,11 @@ MetadataManager::~MetadataManager() {
}
void MetadataManager::_clearAllCleanups(WithLock lock) {
- _clearAllCleanups(
- lock,
- {ErrorCodes::InterruptedDueToReplStateChange,
- str::stream() << "Range deletions in " << _nss.ns()
- << " abandoned because collection was dropped or became unsharded"});
+ _clearAllCleanups(lock,
+ {ErrorCodes::InterruptedDueToReplStateChange,
+ str::stream()
+ << "Range deletions in " << _nss.ns()
+ << " abandoned because collection was dropped or became unsharded"});
}
void MetadataManager::_clearAllCleanups(WithLock, Status status) {
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index cc632bcbbc2..a0ca0696f16 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -328,8 +328,7 @@ Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate(
return {ErrorCodes::OperationIncomplete,
str::stream() << "Unable to enter critical section because the recipient "
"shard thinks all data is cloned while there are still "
- << cloneLocsRemaining
- << " documents remaining"};
+ << cloneLocsRemaining << " documents remaining"};
}
return Status::OK();
@@ -746,8 +745,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
if (!idx) {
return {ErrorCodes::IndexNotFound,
str::stream() << "can't find index with prefix " << _shardKeyPattern.toBSON()
- << " in storeCurrentLocs for "
- << _args.getNss().ns()};
+ << " in storeCurrentLocs for " << _args.getNss().ns()};
}
// Assume both min and max non-empty, append MinKey's to make them fit chosen index
@@ -819,19 +817,10 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
return {
ErrorCodes::ChunkTooBig,
str::stream() << "Cannot move chunk: the maximum number of documents for a chunk is "
- << maxRecsWhenFull
- << ", the maximum chunk size is "
- << _args.getMaxChunkSizeBytes()
- << ", average document size is "
- << avgRecSize
- << ". Found "
- << recCount
- << " documents in chunk "
- << " ns: "
- << _args.getNss().ns()
- << " "
- << _args.getMinKey()
- << " -> "
+ << maxRecsWhenFull << ", the maximum chunk size is "
+ << _args.getMaxChunkSizeBytes() << ", average document size is "
+ << avgRecSize << ". Found " << recCount << " documents in chunk "
+ << " ns: " << _args.getNss().ns() << " " << _args.getMinKey() << " -> "
<< _args.getMaxKey()};
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
index aa21bce528a..1e5fe3ec7e1 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
@@ -86,8 +86,8 @@ public:
invariant(_chunkCloner);
} else {
uasserted(ErrorCodes::IllegalOperation,
- str::stream() << "No active migrations were found for collection "
- << nss->ns());
+ str::stream()
+ << "No active migrations were found for collection " << nss->ns());
}
}
@@ -317,9 +317,7 @@ public:
auto rollbackId = repl::ReplicationProcess::get(opCtx)->getRollbackID();
uassert(50881,
str::stream() << "rollback detected, rollbackId was "
- << rollbackIdAtMigrationInit
- << " but is now "
- << rollbackId,
+ << rollbackIdAtMigrationInit << " but is now " << rollbackId,
rollbackId == rollbackIdAtMigrationInit);
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 68bb3aba86e..a88be055ad6 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -436,8 +436,7 @@ Status MigrationDestinationManager::abort(const MigrationSessionId& sessionId) {
if (!_sessionId->matches(sessionId)) {
return {ErrorCodes::CommandFailed,
str::stream() << "received abort request from a stale session "
- << sessionId.toString()
- << ". Current session is "
+ << sessionId.toString() << ". Current session is "
<< _sessionId->toString()};
}
@@ -462,8 +461,7 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio
if (_state != STEADY) {
return {ErrorCodes::CommandFailed,
str::stream() << "Migration startCommit attempted when not in STEADY state."
- << " Sender's session is "
- << sessionId.toString()
+ << " Sender's session is " << sessionId.toString()
<< (_sessionId ? (". Current session is " + _sessionId->toString())
: ". No active session on this shard.")};
}
@@ -477,8 +475,7 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio
if (!_sessionId->matches(sessionId)) {
return {ErrorCodes::CommandFailed,
str::stream() << "startCommit received commit request from a stale session "
- << sessionId.toString()
- << ". Current session is "
+ << sessionId.toString() << ". Current session is "
<< _sessionId->toString()};
}
@@ -550,9 +547,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
auto infos = infosRes.docs;
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "expected listCollections against the primary shard for "
- << nss.toString()
- << " to return 1 entry, but got "
- << infos.size()
+ << nss.toString() << " to return 1 entry, but got " << infos.size()
<< " entries",
infos.size() == 1);
@@ -574,8 +569,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
uassert(ErrorCodes::InvalidUUID,
str::stream() << "The donor shard did not return a UUID for collection " << nss.ns()
- << " as part of its listCollections response: "
- << entry
+ << " as part of its listCollections response: " << entry
<< ", but this node expects to see a UUID.",
!info["uuid"].eoo());
@@ -602,8 +596,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
uassert(ErrorCodes::InvalidUUID,
str::stream()
- << "Cannot create collection "
- << nss.ns()
+ << "Cannot create collection " << nss.ns()
<< " because we already have an identically named collection with UUID "
<< (collection->uuid() ? collection->uuid()->toString() : "(none)")
<< ", which differs from the donor's UUID "
@@ -622,10 +615,10 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
if (!indexSpecs.empty()) {
// Only allow indexes to be copied if the collection does not have any documents.
uassert(ErrorCodes::CannotCreateCollection,
- str::stream() << "aborting, shard is missing " << indexSpecs.size()
- << " indexes and "
- << "collection is not empty. Non-trivial "
- << "index creation should be scheduled manually",
+ str::stream()
+ << "aborting, shard is missing " << indexSpecs.size() << " indexes and "
+ << "collection is not empty. Non-trivial "
+ << "index creation should be scheduled manually",
collection->numRecords(opCtx) == 0);
}
return indexSpecs;
@@ -1153,10 +1146,9 @@ CollectionShardingRuntime::CleanupNotification MigrationDestinationManager::_not
if (!optMetadata || !(*optMetadata)->isSharded() ||
(*optMetadata)->getCollVersion().epoch() != _epoch) {
return Status{ErrorCodes::StaleShardVersion,
- str::stream() << "Not marking chunk " << redact(range.toString())
- << " as pending because the epoch of "
- << _nss.ns()
- << " changed"};
+ str::stream()
+ << "Not marking chunk " << redact(range.toString())
+ << " as pending because the epoch of " << _nss.ns() << " changed"};
}
// Start clearing any leftovers that would be in the new chunk
diff --git a/src/mongo/db/s/migration_session_id.cpp b/src/mongo/db/s/migration_session_id.cpp
index d2cfeab3254..7049a0870cf 100644
--- a/src/mongo/db/s/migration_session_id.cpp
+++ b/src/mongo/db/s/migration_session_id.cpp
@@ -53,8 +53,8 @@ MigrationSessionId MigrationSessionId::generate(StringData donor, StringData rec
invariant(!donor.empty());
invariant(!recipient.empty());
- return MigrationSessionId(str::stream() << donor << "_" << recipient << "_"
- << OID::gen().toString());
+ return MigrationSessionId(str::stream()
+ << donor << "_" << recipient << "_" << OID::gen().toString());
}
StatusWith<MigrationSessionId> MigrationSessionId::extractFromBSON(const BSONObj& obj) {
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index ab4d76a8952..4533ec35968 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -191,10 +191,8 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
uassert(ErrorCodes::StaleEpoch,
str::stream() << "cannot move chunk " << _args.toString()
<< " because collection may have been dropped. "
- << "current epoch: "
- << collectionVersion.epoch()
- << ", cmd epoch: "
- << _args.getVersionEpoch(),
+ << "current epoch: " << collectionVersion.epoch()
+ << ", cmd epoch: " << _args.getVersionEpoch(),
_args.getVersionEpoch() == collectionVersion.epoch());
ChunkType chunkToMove;
@@ -229,9 +227,7 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) {
"moveChunk.start",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
+ << _args.getFromShardId() << "to" << _args.getToShardId()),
ShardingCatalogClient::kMajorityWriteConcern);
if (logStatus != Status::OK()) {
return logStatus;
@@ -455,9 +451,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
"moveChunk.validating",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
+ << _args.getFromShardId() << "to" << _args.getToShardId()),
ShardingCatalogClient::kMajorityWriteConcern);
if ((ErrorCodes::isInterruption(status.code()) ||
@@ -490,12 +484,11 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
}
fassert(40137,
- status.withContext(
- str::stream() << "Failed to commit migration for chunk " << _args.toString()
- << " due to "
- << redact(migrationCommitStatus)
- << ". Updating the optime with a write before refreshing the "
- << "metadata also failed"));
+ status.withContext(str::stream()
+ << "Failed to commit migration for chunk " << _args.toString()
+ << " due to " << redact(migrationCommitStatus)
+ << ". Updating the optime with a write before refreshing the "
+ << "metadata also failed"));
}
// Do a best effort attempt to incrementally refresh the metadata before leaving the critical
@@ -527,8 +520,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
return migrationCommitStatus.withContext(
str::stream() << "Orphaned range not cleaned up. Failed to refresh metadata after"
" migration commit due to '"
- << refreshStatus.toString()
- << "' after commit failed");
+ << refreshStatus.toString() << "' after commit failed");
}
const auto refreshedMetadata = _getCurrentMetadataAndCheckEpoch(opCtx);
@@ -572,10 +564,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
"moveChunk.commit",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()
- << "counts"
+ << _args.getFromShardId() << "to" << _args.getToShardId() << "counts"
<< _recipientCloneCounts),
ShardingCatalogClient::kMajorityWriteConcern);
@@ -635,9 +624,7 @@ void MigrationSourceManager::cleanupOnError(OperationContext* opCtx) {
"moveChunk.error",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
+ << _args.getFromShardId() << "to" << _args.getToShardId()),
ShardingCatalogClient::kMajorityWriteConcern);
try {
@@ -664,8 +651,7 @@ ScopedCollectionMetadata MigrationSourceManager::_getCurrentMetadataAndCheckEpoc
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "The collection was dropped or recreated since the migration began. "
- << "Expected collection epoch: "
- << _collectionEpoch.toString()
+ << "Expected collection epoch: " << _collectionEpoch.toString()
<< ", but found: "
<< (metadata->isSharded() ? metadata->getCollVersion().epoch().toString()
: "unsharded collection."),
@@ -687,9 +673,7 @@ void MigrationSourceManager::_notifyChangeStreamsOnRecipientFirstChunk(
// The message expected by change streams
const auto o2Message = BSON("type"
<< "migrateChunkToNewShard"
- << "from"
- << _args.getFromShardId()
- << "to"
+ << "from" << _args.getFromShardId() << "to"
<< _args.getToShardId());
auto const serviceContext = opCtx->getClient()->getServiceContext();
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index ac20cb2f350..a66109e73ba 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -45,7 +45,7 @@ const char kDestinationShard[] = "destination";
const char kIsDonorShard[] = "isDonorShard";
const char kChunk[] = "chunk";
const char kCollection[] = "collection";
-}
+} // namespace
BSONObj makeMigrationStatusDocument(const NamespaceString& nss,
const ShardId& fromShard,
diff --git a/src/mongo/db/s/migration_util.h b/src/mongo/db/s/migration_util.h
index dc2469d8602..67b59761477 100644
--- a/src/mongo/db/s/migration_util.h
+++ b/src/mongo/db/s/migration_util.h
@@ -56,6 +56,6 @@ BSONObj makeMigrationStatusDocument(const NamespaceString& nss,
const BSONObj& min,
const BSONObj& max);
-} // namespace shardutil
+} // namespace migrationutil
} // namespace mongo
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index dd62c984292..8fafb8c0253 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -149,8 +149,8 @@ public:
} catch (const std::exception& e) {
scopedMigration.signalComplete(
{ErrorCodes::InternalError,
- str::stream() << "Severe error occurred while running moveChunk command: "
- << e.what()});
+ str::stream()
+ << "Severe error occurred while running moveChunk command: " << e.what()});
throw;
}
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index 63a1ebb7bd6..be0a16193eb 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -282,8 +282,7 @@ Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
fassert(50762,
validateStatus.withContext(
str::stream() << "Failed to commit movePrimary for database " << getNss().ns()
- << " due to "
- << redact(commitStatus)
+ << " due to " << redact(commitStatus)
<< ". Updating the optime with a write before clearing the "
<< "version also failed"));
diff --git a/src/mongo/db/s/scoped_operation_completion_sharding_actions.h b/src/mongo/db/s/scoped_operation_completion_sharding_actions.h
index de61f5fbfd2..baea9099032 100644
--- a/src/mongo/db/s/scoped_operation_completion_sharding_actions.h
+++ b/src/mongo/db/s/scoped_operation_completion_sharding_actions.h
@@ -37,7 +37,7 @@ namespace mongo {
* This class has a destructor that handles rerouting exceptions that might have occurred
* during an operation. For this reason, there should be only one instance of this object
* on the chain of one OperationContext.
-*/
+ */
class OperationContext;
class ScopedOperationCompletionShardingActions : public PolymorphicScoped {
diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp
index fd6a3086dd7..0ff9dcaa737 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination.cpp
@@ -92,10 +92,8 @@ repl::OplogLink extractPrePostImageTs(const ProcessOplogResult& lastResult,
if (!lastResult.isPrePostImage) {
uassert(40628,
str::stream() << "expected oplog with ts: " << entry.getTimestamp().toString()
- << " to not have "
- << repl::OplogEntryBase::kPreImageOpTimeFieldName
- << " or "
- << repl::OplogEntryBase::kPostImageOpTimeFieldName,
+ << " to not have " << repl::OplogEntryBase::kPreImageOpTimeFieldName
+ << " or " << repl::OplogEntryBase::kPostImageOpTimeFieldName,
!entry.getPreImageOpTime() && !entry.getPostImageOpTime());
return oplogLink;
@@ -109,15 +107,11 @@ repl::OplogLink extractPrePostImageTs(const ProcessOplogResult& lastResult,
uassert(40629,
str::stream() << "expected oplog with ts: " << entry.getTimestamp().toString() << ": "
- << redact(entry.toBSON())
- << " to have session: "
- << lastResult.sessionId,
+ << redact(entry.toBSON()) << " to have session: " << lastResult.sessionId,
lastResult.sessionId == sessionId);
uassert(40630,
str::stream() << "expected oplog with ts: " << entry.getTimestamp().toString() << ": "
- << redact(entry.toBSON())
- << " to have txnNumber: "
- << lastResult.txnNum,
+ << redact(entry.toBSON()) << " to have txnNumber: " << lastResult.txnNum,
lastResult.txnNum == txnNum);
if (entry.getPreImageOpTime()) {
@@ -127,11 +121,8 @@ repl::OplogLink extractPrePostImageTs(const ProcessOplogResult& lastResult,
} else {
uasserted(40631,
str::stream() << "expected oplog with opTime: " << entry.getOpTime().toString()
- << ": "
- << redact(entry.toBSON())
- << " to have either "
- << repl::OplogEntryBase::kPreImageOpTimeFieldName
- << " or "
+ << ": " << redact(entry.toBSON()) << " to have either "
+ << repl::OplogEntryBase::kPreImageOpTimeFieldName << " or "
<< repl::OplogEntryBase::kPostImageOpTimeFieldName);
}
@@ -152,20 +143,17 @@ repl::OplogEntry parseOplog(const BSONObj& oplogBSON) {
uassert(ErrorCodes::UnsupportedFormat,
str::stream() << "oplog with opTime " << oplogEntry.getTimestamp().toString()
- << " does not have sessionId: "
- << redact(oplogBSON),
+ << " does not have sessionId: " << redact(oplogBSON),
sessionInfo.getSessionId());
uassert(ErrorCodes::UnsupportedFormat,
str::stream() << "oplog with opTime " << oplogEntry.getTimestamp().toString()
- << " does not have txnNumber: "
- << redact(oplogBSON),
+ << " does not have txnNumber: " << redact(oplogBSON),
sessionInfo.getTxnNumber());
uassert(ErrorCodes::UnsupportedFormat,
str::stream() << "oplog with opTime " << oplogEntry.getTimestamp().toString()
- << " does not have stmtId: "
- << redact(oplogBSON),
+ << " does not have stmtId: " << redact(oplogBSON),
oplogEntry.getStatementId());
return oplogEntry;
@@ -234,9 +222,7 @@ ProcessOplogResult processSessionOplog(const BSONObj& oplogBSON,
uassert(40632,
str::stream() << "Can't handle 2 pre/post image oplog in a row. Prevoius oplog "
<< lastResult.oplogTime.getTimestamp().toString()
- << ", oplog ts: "
- << oplogEntry.getTimestamp().toString()
- << ": "
+ << ", oplog ts: " << oplogEntry.getTimestamp().toString() << ": "
<< oplogBSON,
!lastResult.isPrePostImage);
}
@@ -310,9 +296,7 @@ ProcessOplogResult processSessionOplog(const BSONObj& oplogBSON,
const auto& oplogOpTime = result.oplogTime;
uassert(40633,
str::stream() << "Failed to create new oplog entry for oplog with opTime: "
- << oplogEntry.getOpTime().toString()
- << ": "
- << redact(oplogBSON),
+ << oplogEntry.getOpTime().toString() << ": " << redact(oplogBSON),
!oplogOpTime.isNull());
// Do not call onWriteOpCompletedOnPrimary if we inserted a pre/post image, because the
diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp
index 11efb9ad1d3..86f8a8a6cf6 100644
--- a/src/mongo/db/s/session_catalog_migration_source.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source.cpp
@@ -259,8 +259,9 @@ bool SessionCatalogMigrationSource::_handleWriteHistory(WithLock, OperationConte
// Skip the rest of the chain for this session since the ns is unrelated with the
// current one being migrated. It is ok to not check the rest of the chain because
// retryable writes doesn't allow touching different namespaces.
- if (!nextStmtId || (nextStmtId && *nextStmtId != kIncompleteHistoryStmtId &&
- nextOplog->getNss() != _ns)) {
+ if (!nextStmtId ||
+ (nextStmtId && *nextStmtId != kIncompleteHistoryStmtId &&
+ nextOplog->getNss() != _ns)) {
_currentOplogIterator.reset();
return false;
}
@@ -419,8 +420,7 @@ boost::optional<repl::OplogEntry> SessionCatalogMigrationSource::SessionOplogIte
uassert(40656,
str::stream() << "rollback detected, rollbackId was " << _initialRollbackId
- << " but is now "
- << rollbackId,
+ << " but is now " << rollbackId,
rollbackId == _initialRollbackId);
// If the rollbackId hasn't changed, and this record corresponds to a retryable write,
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
index dd03e31b206..10564146ca4 100644
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ b/src/mongo/db/s/set_shard_version_command.cpp
@@ -164,8 +164,7 @@ public:
const auto storedShardName = shardingState->shardId().toString();
uassert(ErrorCodes::BadValue,
str::stream() << "received shardName " << shardName
- << " which differs from stored shardName "
- << storedShardName,
+ << " which differs from stored shardName " << storedShardName,
storedShardName == shardName);
// Validate config connection string parameter.
@@ -184,8 +183,7 @@ public:
Grid::get(opCtx)->shardRegistry()->getConfigServerConnectionString();
uassert(ErrorCodes::IllegalOperation,
str::stream() << "Given config server set name: " << givenConnStr.getSetName()
- << " differs from known set name: "
- << storedConnStr.getSetName(),
+ << " differs from known set name: " << storedConnStr.getSetName(),
givenConnStr.getSetName() == storedConnStr.getSetName());
// Validate namespace parameter.
@@ -366,11 +364,11 @@ public:
if (!status.isOK()) {
// The reload itself was interrupted or confused here
- errmsg = str::stream() << "could not refresh metadata for " << nss.ns()
- << " with requested shard version "
- << requestedVersion.toString()
- << ", stored shard version is " << currVersion.toString()
- << causedBy(redact(status));
+ errmsg = str::stream()
+ << "could not refresh metadata for " << nss.ns()
+ << " with requested shard version " << requestedVersion.toString()
+ << ", stored shard version is " << currVersion.toString()
+ << causedBy(redact(status));
warning() << errmsg;
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index 3776b6e89e9..0c25b399c43 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -101,8 +101,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
maxCollVersion.incMajor();
BSONObj shardChunk =
BSON(ChunkType::minShardID(mins[i])
- << ChunkType::max(maxs[i])
- << ChunkType::shard(kShardId.toString())
+ << ChunkType::max(maxs[i]) << ChunkType::shard(kShardId.toString())
<< ChunkType::lastmod(Date_t::fromMillisSinceEpoch(maxCollVersion.toLong())));
chunks.push_back(
@@ -142,8 +141,8 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
try {
DBDirectClient client(operationContext());
for (auto& chunk : chunks) {
- Query query(BSON(ChunkType::minShardID() << chunk.getMin() << ChunkType::max()
- << chunk.getMax()));
+ Query query(BSON(ChunkType::minShardID()
+ << chunk.getMin() << ChunkType::max() << chunk.getMax()));
query.readPref(ReadPreference::Nearest, BSONArray());
std::unique_ptr<DBClientCursor> cursor = client.query(chunkMetadataNss, query, 1);
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index e1a9cb39cd9..142b7d3e69f 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -158,9 +158,7 @@ ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const Namespac
}
uassert(ErrorCodes::OperationFailed,
str::stream() << "Failed to read persisted collections entry for collection '"
- << nss.ns()
- << "' due to '"
- << statusWithCollection.getStatus().toString()
+ << nss.ns() << "' due to '" << statusWithCollection.getStatus().toString()
<< "'.",
statusWithCollection.isOK());
@@ -173,9 +171,7 @@ ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const Namespac
statusWithCollection.getValue().getEpoch());
uassert(ErrorCodes::OperationFailed,
str::stream() << "Failed to read highest version persisted chunk for collection '"
- << nss.ns()
- << "' due to '"
- << statusWithChunk.getStatus().toString()
+ << nss.ns() << "' due to '" << statusWithChunk.getStatus().toString()
<< "'.",
statusWithChunk.isOK());
@@ -263,8 +259,8 @@ StatusWith<CollectionAndChangedChunks> getIncompletePersistedMetadataSinceVersio
return CollectionAndChangedChunks();
}
return Status(ErrorCodes::OperationFailed,
- str::stream() << "Failed to load local metadata due to '" << status.toString()
- << "'.");
+ str::stream()
+ << "Failed to load local metadata due to '" << status.toString() << "'.");
}
}
@@ -437,8 +433,8 @@ void ShardServerCatalogCacheLoader::getDatabase(
return std::make_tuple(_role == ReplicaSetRole::Primary, _term);
}();
- _threadPool.schedule([ this, name = dbName.toString(), callbackFn, isPrimary, term ](
- auto status) noexcept {
+ _threadPool.schedule([ this, name = dbName.toString(), callbackFn, isPrimary,
+ term ](auto status) noexcept {
invariant(status);
auto context = _contexts.makeOperationContext(*Client::getCurrent());
@@ -611,19 +607,18 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
}();
auto remoteRefreshFn = [this, nss, catalogCacheSinceVersion, maxLoaderVersion, termScheduled](
- OperationContext* opCtx,
- StatusWith<CollectionAndChangedChunks>
- swCollectionAndChangedChunks) -> StatusWith<CollectionAndChangedChunks> {
-
+ OperationContext* opCtx,
+ StatusWith<CollectionAndChangedChunks> swCollectionAndChangedChunks)
+ -> StatusWith<CollectionAndChangedChunks> {
if (swCollectionAndChangedChunks == ErrorCodes::NamespaceNotFound) {
_ensureMajorityPrimaryAndScheduleCollAndChunksTask(
opCtx,
nss,
collAndChunkTask{swCollectionAndChangedChunks, maxLoaderVersion, termScheduled});
- LOG_CATALOG_REFRESH(1) << "Cache loader remotely refreshed for collection " << nss
- << " from version " << maxLoaderVersion
- << " and no metadata was found.";
+ LOG_CATALOG_REFRESH(1)
+ << "Cache loader remotely refreshed for collection " << nss << " from version "
+ << maxLoaderVersion << " and no metadata was found.";
return swCollectionAndChangedChunks;
}
@@ -634,12 +629,11 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
auto& collAndChunks = swCollectionAndChangedChunks.getValue();
if (collAndChunks.changedChunks.back().getVersion().epoch() != collAndChunks.epoch) {
- return Status{
- ErrorCodes::ConflictingOperationInProgress,
- str::stream() << "Invalid chunks found when reloading '" << nss.toString()
+ return Status{ErrorCodes::ConflictingOperationInProgress,
+ str::stream()
+ << "Invalid chunks found when reloading '" << nss.toString()
<< "' Previous collection epoch was '"
- << collAndChunks.epoch.toString()
- << "', but found a new epoch '"
+ << collAndChunks.epoch.toString() << "', but found a new epoch '"
<< collAndChunks.changedChunks.back().getVersion().epoch().toString()
<< "'. Collection was dropped and recreated."};
}
@@ -716,8 +710,8 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetDatabase(
StringData dbName,
long long termScheduled,
stdx::function<void(OperationContext*, StatusWith<DatabaseType>)> callbackFn) {
- auto remoteRefreshFn = [ this, name = dbName.toString(), termScheduled ](
- OperationContext * opCtx, StatusWith<DatabaseType> swDatabaseType) {
+ auto remoteRefreshFn = [this, name = dbName.toString(), termScheduled](
+ OperationContext* opCtx, StatusWith<DatabaseType> swDatabaseType) {
if (swDatabaseType == ErrorCodes::NamespaceNotFound) {
_ensureMajorityPrimaryAndScheduleDbTask(
opCtx, name, DBTask{swDatabaseType, termScheduled});
@@ -777,11 +771,12 @@ StatusWith<CollectionAndChangedChunks> ShardServerCatalogCacheLoader::_getLoader
: ("enqueued metadata from " +
enqueued.changedChunks.front().getVersion().toString() + " to " +
enqueued.changedChunks.back().getVersion().toString()))
- << " and " << (persisted.changedChunks.empty()
- ? "no persisted metadata"
- : ("persisted metadata from " +
- persisted.changedChunks.front().getVersion().toString() + " to " +
- persisted.changedChunks.back().getVersion().toString()))
+ << " and "
+ << (persisted.changedChunks.empty()
+ ? "no persisted metadata"
+ : ("persisted metadata from " +
+ persisted.changedChunks.front().getVersion().toString() + " to " +
+ persisted.changedChunks.back().getVersion().toString()))
<< ", GTE cache version " << catalogCacheSinceVersion;
if (!tasksAreEnqueued) {
@@ -892,7 +887,7 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleDbTask(Oper
return;
}
- _threadPool.schedule([ this, name = dbName.toString() ](auto status) {
+ _threadPool.schedule([this, name = dbName.toString()](auto status) {
invariant(status);
_runDbTasks(name);
@@ -979,7 +974,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
}
}
- _threadPool.schedule([ this, name = dbName.toString() ](auto status) {
+ _threadPool.schedule([this, name = dbName.toString()](auto status) {
if (ErrorCodes::isCancelationError(status.code())) {
LOG(0) << "Cache loader failed to schedule a persisted metadata update"
<< " task for namespace '" << name << "' due to '" << redact(status)
@@ -1026,12 +1021,8 @@ void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata(
uassertStatusOKWithContext(
persistCollectionAndChangedChunks(opCtx, nss, task.collectionAndChangedChunks.get()),
str::stream() << "Failed to update the persisted chunk metadata for collection '"
- << nss.ns()
- << "' from '"
- << task.minQueryVersion.toString()
- << "' to '"
- << task.maxQueryVersion.toString()
- << "'. Will be retried.");
+ << nss.ns() << "' from '" << task.minQueryVersion.toString() << "' to '"
+ << task.maxQueryVersion.toString() << "'. Will be retried.");
LOG_CATALOG_REFRESH(1) << "Successfully updated persisted chunk metadata for collection '"
<< nss << "' from '" << task.minQueryVersion
@@ -1057,15 +1048,13 @@ void ShardServerCatalogCacheLoader::_updatePersistedDbMetadata(OperationContext*
// The database was dropped. The persisted metadata for the collection must be cleared.
uassertStatusOKWithContext(deleteDatabasesEntry(opCtx, dbName),
str::stream() << "Failed to clear persisted metadata for db '"
- << dbName.toString()
- << "'. Will be retried.");
+ << dbName.toString() << "'. Will be retried.");
return;
}
uassertStatusOKWithContext(persistDbVersion(opCtx, *task.dbType),
str::stream() << "Failed to update the persisted metadata for db '"
- << dbName.toString()
- << "'. Will be retried.");
+ << dbName.toString() << "'. Will be retried.");
LOG_CATALOG_REFRESH(1) << "Successfully updated persisted metadata for db "
<< dbName.toString();
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index 43653b40a2a..980a4be7865 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -59,8 +59,9 @@ bool isStandaloneOrPrimary(OperationContext* opCtx) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
const bool isReplSet =
replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
- return !isReplSet || (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
- repl::MemberState::RS_PRIMARY);
+ return !isReplSet ||
+ (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
+ repl::MemberState::RS_PRIMARY);
}
/**
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index e95c954e10d..0abb64e96cc 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -98,36 +98,36 @@ public:
// Update the shard identy config string
void onConfirmedSet(const State& state) final {
- Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor()->schedule([
- serviceContext = _serviceContext,
- connStr = state.connStr
- ](Status status) {
- if (ErrorCodes::isCancelationError(status.code())) {
- LOG(2) << "Unable to schedule confirmed set update due to " << status;
- return;
- }
- uassertStatusOK(status);
-
- LOG(0) << "Updating config server with confirmed set " << connStr;
- Grid::get(serviceContext)->shardRegistry()->updateReplSetHosts(connStr);
-
- if (MONGO_FAIL_POINT(failUpdateShardIdentityConfigString)) {
- return;
- }
-
- auto configsvrConnStr =
- Grid::get(serviceContext)->shardRegistry()->getConfigServerConnectionString();
-
- // Only proceed if the notification is for the configsvr
- if (configsvrConnStr.getSetName() != connStr.getSetName()) {
- return;
- }
-
- ThreadClient tc("updateShardIdentityConfigString", serviceContext);
- auto opCtx = tc->makeOperationContext();
-
- ShardingInitializationMongoD::updateShardIdentityConfigString(opCtx.get(), connStr);
- });
+ Grid::get(_serviceContext)
+ ->getExecutorPool()
+ ->getFixedExecutor()
+ ->schedule([serviceContext = _serviceContext, connStr = state.connStr](Status status) {
+ if (ErrorCodes::isCancelationError(status.code())) {
+ LOG(2) << "Unable to schedule confirmed set update due to " << status;
+ return;
+ }
+ uassertStatusOK(status);
+
+ LOG(0) << "Updating config server with confirmed set " << connStr;
+ Grid::get(serviceContext)->shardRegistry()->updateReplSetHosts(connStr);
+
+ if (MONGO_FAIL_POINT(failUpdateShardIdentityConfigString)) {
+ return;
+ }
+
+ auto configsvrConnStr =
+ Grid::get(serviceContext)->shardRegistry()->getConfigServerConnectionString();
+
+ // Only proceed if the notification is for the configsvr
+ if (configsvrConnStr.getSetName() != connStr.getSetName()) {
+ return;
+ }
+
+ ThreadClient tc("updateShardIdentityConfigString", serviceContext);
+ auto opCtx = tc->makeOperationContext();
+
+ ShardingInitializationMongoD::updateShardIdentityConfigString(opCtx.get(), connStr);
+ });
}
void onPossibleSet(const State& state) final {
Grid::get(_serviceContext)->shardRegistry()->updateReplSetHosts(state.connStr);
diff --git a/src/mongo/db/s/sharding_initialization_mongod_test.cpp b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
index d124b98cc21..bb8bc7abc58 100644
--- a/src/mongo/db/s/sharding_initialization_mongod_test.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
@@ -183,18 +183,19 @@ TEST_F(ShardingInitializationMongoDTest, InitWhilePreviouslyInErrorStateWillStay
shardIdentity.setShardName(kShardName);
shardIdentity.setClusterId(OID::gen());
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
+ shardingInitialization()->setGlobalInitMethodForTest([](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) {
uasserted(ErrorCodes::ShutdownInProgress, "Not an actual shutdown");
});
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity);
// ShardingState is now in error state, attempting to call it again will still result in error.
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
- FAIL("Should not be invoked!");
- });
+ shardingInitialization()->setGlobalInitMethodForTest(
+ [](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) { FAIL("Should not be invoked!"); });
ASSERT_THROWS_CODE(
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity),
@@ -223,10 +224,10 @@ TEST_F(ShardingInitializationMongoDTest, InitializeAgainWithMatchingShardIdentit
shardIdentity2.setShardName(kShardName);
shardIdentity2.setClusterId(clusterID);
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
- FAIL("Should not be invoked!");
- });
+ shardingInitialization()->setGlobalInitMethodForTest(
+ [](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) { FAIL("Should not be invoked!"); });
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity2);
@@ -256,10 +257,10 @@ TEST_F(ShardingInitializationMongoDTest, InitializeAgainWithMatchingReplSetNameS
shardIdentity2.setShardName(kShardName);
shardIdentity2.setClusterId(clusterID);
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
- FAIL("Should not be invoked!");
- });
+ shardingInitialization()->setGlobalInitMethodForTest(
+ [](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) { FAIL("Should not be invoked!"); });
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity2);
@@ -291,13 +292,9 @@ TEST_F(ShardingInitializationMongoDTest,
storageGlobalParams.readOnly = true;
serverGlobalParams.overrideShardIdentity =
BSON("_id"
- << "shardIdentity"
- << ShardIdentity::kShardNameFieldName
- << kShardName
- << ShardIdentity::kClusterIdFieldName
- << OID::gen()
- << ShardIdentity::kConfigsvrConnectionStringFieldName
- << "invalid");
+ << "shardIdentity" << ShardIdentity::kShardNameFieldName << kShardName
+ << ShardIdentity::kClusterIdFieldName << OID::gen()
+ << ShardIdentity::kConfigsvrConnectionStringFieldName << "invalid");
ASSERT_THROWS_CODE(
shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext()),
@@ -436,10 +433,8 @@ TEST_F(ShardingInitializationMongoDTest,
ScopedSetStandaloneMode standalone(getServiceContext());
BSONObj invalidShardIdentity = BSON("_id"
- << "shardIdentity"
- << ShardIdentity::kShardNameFieldName
- << kShardName
- << ShardIdentity::kClusterIdFieldName
+ << "shardIdentity" << ShardIdentity::kShardNameFieldName
+ << kShardName << ShardIdentity::kClusterIdFieldName
<< OID::gen()
<< ShardIdentity::kConfigsvrConnectionStringFieldName
<< "invalid");
diff --git a/src/mongo/db/s/sharding_logging.cpp b/src/mongo/db/s/sharding_logging.cpp
index 3529a42cfbd..c3d07903ceb 100644
--- a/src/mongo/db/s/sharding_logging.cpp
+++ b/src/mongo/db/s/sharding_logging.cpp
@@ -121,10 +121,10 @@ Status ShardingLogging::_log(OperationContext* opCtx,
const BSONObj& detail,
const WriteConcernOptions& writeConcern) {
Date_t now = Grid::get(opCtx)->getNetwork()->now();
- const std::string serverName = str::stream() << Grid::get(opCtx)->getNetwork()->getHostName()
- << ":" << serverGlobalParams.port;
- const std::string changeId = str::stream() << serverName << "-" << now.toString() << "-"
- << OID::gen();
+ const std::string serverName = str::stream()
+ << Grid::get(opCtx)->getNetwork()->getHostName() << ":" << serverGlobalParams.port;
+ const std::string changeId = str::stream()
+ << serverName << "-" << now.toString() << "-" << OID::gen();
ChangeLogType changeLog;
changeLog.setChangeId(changeId);
@@ -162,9 +162,9 @@ Status ShardingLogging::_createCappedConfigCollection(OperationContext* opCtx,
StringData collName,
int cappedSize,
const WriteConcernOptions& writeConcern) {
- BSONObj createCmd = BSON("create" << collName << "capped" << true << "size" << cappedSize
- << WriteConcernOptions::kWriteConcernField
- << writeConcern.toBSON());
+ BSONObj createCmd =
+ BSON("create" << collName << "capped" << true << "size" << cappedSize
+ << WriteConcernOptions::kWriteConcernField << writeConcern.toBSON());
auto result =
Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp
index e59ed3568f7..e229badedbc 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection.cpp
@@ -124,8 +124,7 @@ void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss)
str::stream() << "A previous attempt to shard collection " << nss.ns()
<< " failed after writing some initial chunks to config.chunks. Please "
"manually delete the partially written chunks for collection "
- << nss.ns()
- << " from config.chunks",
+ << nss.ns() << " from config.chunks",
numChunks == 0);
}
@@ -229,9 +228,7 @@ void createCollectionOrValidateExisting(OperationContext* opCtx,
bool isUnique = idx["unique"].trueValue();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection '" << nss.ns() << "' with unique index on "
- << currentKey
- << " and proposed shard key "
- << proposedKey
+ << currentKey << " and proposed shard key " << proposedKey
<< ". Uniqueness can't be maintained unless shard key is a prefix",
!isUnique || shardKeyPattern.isUniqueIndexCompatible(currentKey));
}
@@ -249,8 +246,7 @@ void createCollectionOrValidateExisting(OperationContext* opCtx,
// per field per collection.
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection " << nss.ns()
- << " with hashed shard key "
- << proposedKey
+ << " with hashed shard key " << proposedKey
<< " because the hashed index uses a non-default seed of "
<< idx["seed"].numberInt(),
!shardKeyPattern.isHashedPattern() || idx["seed"].eoo() ||
@@ -336,9 +332,7 @@ void validateShardKeyAgainstExistingZones(OperationContext* opCtx,
BSONElement tagMaxKeyElement = tagMaxFields.next();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "the min and max of the existing zone " << tag.getMinKey()
- << " -->> "
- << tag.getMaxKey()
- << " have non-matching keys",
+ << " -->> " << tag.getMaxKey() << " have non-matching keys",
tagMinKeyElement.fieldNameStringData() ==
tagMaxKeyElement.fieldNameStringData());
@@ -350,20 +344,15 @@ void validateShardKeyAgainstExistingZones(OperationContext* opCtx,
uassert(ErrorCodes::InvalidOptions,
str::stream() << "the proposed shard key " << proposedKey.toString()
<< " does not match with the shard key of the existing zone "
- << tag.getMinKey()
- << " -->> "
- << tag.getMaxKey(),
+ << tag.getMinKey() << " -->> " << tag.getMaxKey(),
match);
if (ShardKeyPattern::isHashedPatternEl(proposedKeyElement) &&
(tagMinKeyElement.type() != NumberLong || tagMaxKeyElement.type() != NumberLong)) {
uasserted(ErrorCodes::InvalidOptions,
str::stream() << "cannot do hash sharding with the proposed key "
- << proposedKey.toString()
- << " because there exists a zone "
- << tag.getMinKey()
- << " -->> "
- << tag.getMaxKey()
+ << proposedKey.toString() << " because there exists a zone "
+ << tag.getMinKey() << " -->> " << tag.getMaxKey()
<< " whose boundaries are not "
"of type NumberLong");
}
@@ -418,8 +407,7 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx, const Nam
uassert(ErrorCodes::InternalError,
str::stream() << "expected to return a UUID for collection " << nss.ns()
- << " as part of 'info' field but got "
- << res,
+ << " as part of 'info' field but got " << res,
collectionInfo.hasField("uuid"));
return uassertStatusOK(UUID::parse(collectionInfo["uuid"]));
@@ -503,8 +491,7 @@ ShardCollectionTargetState calculateTargetState(OperationContext* opCtx,
if (fromMapReduce) {
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Map reduce with sharded output to a new collection found "
- << nss.ns()
- << " to be non-empty which is not supported.",
+ << nss.ns() << " to be non-empty which is not supported.",
isEmpty);
}
@@ -704,17 +691,21 @@ UUID shardCollection(OperationContext* opCtx,
InitialSplitPolicy::ShardCollectionConfig initialChunks;
boost::optional<ShardCollectionTargetState> targetState;
- auto writeChunkDocumentsAndRefreshShards = [&](
- const ShardCollectionTargetState& targetState,
- const InitialSplitPolicy::ShardCollectionConfig& initialChunks) {
- // Insert chunk documents to config.chunks on the config server.
- writeFirstChunksToConfig(opCtx, initialChunks);
-
- updateShardingCatalogEntryForCollection(
- opCtx, nss, targetState, initialChunks, *request.getCollation(), request.getUnique());
-
- refreshAllShards(opCtx, nss, dbPrimaryShardId, initialChunks.chunks);
- };
+ auto writeChunkDocumentsAndRefreshShards =
+ [&](const ShardCollectionTargetState& targetState,
+ const InitialSplitPolicy::ShardCollectionConfig& initialChunks) {
+ // Insert chunk documents to config.chunks on the config server.
+ writeFirstChunksToConfig(opCtx, initialChunks);
+
+ updateShardingCatalogEntryForCollection(opCtx,
+ nss,
+ targetState,
+ initialChunks,
+ *request.getCollation(),
+ request.getUnique());
+
+ refreshAllShards(opCtx, nss, dbPrimaryShardId, initialChunks.chunks);
+ };
{
// From this point onward the collection can only be read, not written to, so it is safe to
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index c76edbf8c6d..3a6a2d3a577 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -137,15 +137,14 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
//
// TODO(SERVER-25086): Remove distLock acquisition from split chunk
//
- const std::string whyMessage(
- str::stream() << "splitting chunk " << chunkRange.toString() << " in " << nss.toString());
+ const std::string whyMessage(str::stream() << "splitting chunk " << chunkRange.toString()
+ << " in " << nss.toString());
auto scopedDistLock = Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
opCtx, nss.ns(), whyMessage, DistLockManager::kDefaultLockTimeout);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus().withContext(
str::stream() << "could not acquire collection lock for " << nss.toString()
- << " to split chunk "
- << chunkRange.toString());
+ << " to split chunk " << chunkRange.toString());
}
// If the shard key is hashed, then we must make sure that the split points are of type
@@ -157,12 +156,11 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
BSONElement splitKeyElement = it.next();
if (splitKeyElement.type() != NumberLong) {
return {ErrorCodes::CannotSplit,
- str::stream() << "splitChunk cannot split chunk "
- << chunkRange.toString()
- << ", split point "
- << splitKeyElement.toString()
- << " must be of type "
- "NumberLong for hashed shard key patterns"};
+ str::stream()
+ << "splitChunk cannot split chunk " << chunkRange.toString()
+ << ", split point " << splitKeyElement.toString()
+ << " must be of type "
+ "NumberLong for hashed shard key patterns"};
}
}
}
diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp
index 95815c552d9..06953ff88ea 100644
--- a/src/mongo/db/s/transaction_coordinator.cpp
+++ b/src/mongo/db/s/transaction_coordinator.cpp
@@ -291,13 +291,13 @@ TransactionCoordinator::TransactionCoordinator(ServiceContext* serviceContext,
return txn::deleteCoordinatorDoc(*_scheduler, _lsid, _txnNumber);
})
- .onCompletion([ this, deadlineFuture = std::move(deadlineFuture) ](Status s) mutable {
+ .onCompletion([this, deadlineFuture = std::move(deadlineFuture)](Status s) mutable {
// Interrupt this coordinator's scheduler hierarchy and join the deadline task's future
// in order to guarantee that there are no more threads running within the coordinator.
_scheduler->shutdown(
{ErrorCodes::TransactionCoordinatorDeadlineTaskCanceled, "Coordinator completed"});
- return std::move(deadlineFuture).onCompletion([ this, s = std::move(s) ](Status) {
+ return std::move(deadlineFuture).onCompletion([this, s = std::move(s)](Status) {
// Notify all the listeners which are interested in the coordinator's lifecycle.
// After this call, the coordinator object could potentially get destroyed by its
// lifetime controller, so there shouldn't be any accesses to `this` after this
@@ -373,8 +373,7 @@ void TransactionCoordinator::_done(Status status) {
if (status == ErrorCodes::TransactionCoordinatorSteppingDown)
status = Status(ErrorCodes::InterruptedDueToReplStateChange,
str::stream() << "Coordinator " << _lsid.getId() << ':' << _txnNumber
- << " stopped due to: "
- << status.reason());
+ << " stopped due to: " << status.reason());
LOG(3) << "Two-phase commit for " << _lsid.getId() << ':' << _txnNumber << " completed with "
<< redact(status);
diff --git a/src/mongo/db/s/transaction_coordinator_catalog.cpp b/src/mongo/db/s/transaction_coordinator_catalog.cpp
index b45b4449838..6fa5d45226e 100644
--- a/src/mongo/db/s/transaction_coordinator_catalog.cpp
+++ b/src/mongo/db/s/transaction_coordinator_catalog.cpp
@@ -61,8 +61,8 @@ void TransactionCoordinatorCatalog::onStepDown() {
stdx::unique_lock<stdx::mutex> ul(_mutex);
std::vector<std::shared_ptr<TransactionCoordinator>> coordinatorsToCancel;
- for (auto && [ sessionId, coordinatorsForSession ] : _coordinatorsBySession) {
- for (auto && [ txnNumber, coordinator ] : coordinatorsForSession) {
+ for (auto&& [sessionId, coordinatorsForSession] : _coordinatorsBySession) {
+ for (auto&& [txnNumber, coordinator] : coordinatorsForSession) {
coordinatorsToCancel.emplace_back(coordinator);
}
}
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.cpp b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
index 0af8b465353..58766aa1d28 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
@@ -83,8 +83,8 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
// rather than going through the host targeting below. This ensures that the state changes
// for the participant and coordinator occur sequentially on a single branch of replica set
// history. See SERVER-38142 for details.
- return scheduleWork([ this, shardId, commandObj = commandObj.getOwned() ](OperationContext *
- opCtx) {
+ return scheduleWork([this, shardId, commandObj = commandObj.getOwned()](
+ OperationContext* opCtx) {
// Note: This internal authorization is tied to the lifetime of the client, which will
// be destroyed by 'scheduleWork' immediately after this lambda ends
AuthorizationSession::get(opCtx->getClient())
@@ -114,8 +114,8 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
}
return _targetHostAsync(shardId, readPref)
- .then([ this, shardId, commandObj = commandObj.getOwned(), readPref ](
- HostAndShard hostAndShard) mutable {
+ .then([this, shardId, commandObj = commandObj.getOwned(), readPref](
+ HostAndShard hostAndShard) mutable {
executor::RemoteCommandRequest request(hostAndShard.hostTargeted,
NamespaceString::kAdminDb.toString(),
commandObj,
@@ -166,7 +166,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
ul.unlock();
return std::move(pf.future).tapAll(
- [ this, it = std::move(it) ](StatusWith<ResponseStatus> s) {
+ [this, it = std::move(it)](StatusWith<ResponseStatus> s) {
stdx::lock_guard<stdx::mutex> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.h b/src/mongo/db/s/transaction_coordinator_futures_util.h
index 1c654d8707f..7aef1fc8e78 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.h
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.h
@@ -115,7 +115,7 @@ public:
ul.unlock();
return std::move(pf.future).tapAll(
- [ this, it = std::move(it) ](StatusOrStatusWith<ReturnType> s) {
+ [this, it = std::move(it)](StatusOrStatusWith<ReturnType> s) {
stdx::lock_guard<stdx::mutex> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
@@ -284,7 +284,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
combiner(std::move(combiner)) {}
/*****************************************************
* The first few fields have fixed values. *
- ******************************************************/
+ ******************************************************/
// Protects all state in the SharedBlock.
stdx::mutex mutex;
@@ -299,7 +299,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
/*****************************************************
* The below have initial values based on user input.*
- ******************************************************/
+ ******************************************************/
// The number of input futures that have not yet been resolved and processed.
size_t numOutstandingResponses;
// The variable where the intermediate results and final result is stored.
@@ -374,26 +374,25 @@ Future<FutureContinuationResult<LoopBodyFn>> doWhile(AsyncWorkScheduler& schedul
LoopBodyFn&& f) {
using ReturnType = typename decltype(f())::value_type;
auto future = f();
- return std::move(future).onCompletion([
- &scheduler,
- backoff = std::move(backoff),
- shouldRetryFn = std::forward<ShouldRetryFn>(shouldRetryFn),
- f = std::forward<LoopBodyFn>(f)
- ](StatusOrStatusWith<ReturnType> s) mutable {
- if (!shouldRetryFn(s))
- return Future<ReturnType>(std::move(s));
-
- // Retry after a delay.
- const auto delayMillis = (backoff ? backoff->nextSleep() : Milliseconds(0));
- return scheduler.scheduleWorkIn(delayMillis, [](OperationContext* opCtx) {}).then([
- &scheduler,
- backoff = std::move(backoff),
- shouldRetryFn = std::move(shouldRetryFn),
- f = std::move(f)
- ]() mutable {
- return doWhile(scheduler, std::move(backoff), std::move(shouldRetryFn), std::move(f));
+ return std::move(future).onCompletion(
+ [&scheduler,
+ backoff = std::move(backoff),
+ shouldRetryFn = std::forward<ShouldRetryFn>(shouldRetryFn),
+ f = std::forward<LoopBodyFn>(f)](StatusOrStatusWith<ReturnType> s) mutable {
+ if (!shouldRetryFn(s))
+ return Future<ReturnType>(std::move(s));
+
+ // Retry after a delay.
+ const auto delayMillis = (backoff ? backoff->nextSleep() : Milliseconds(0));
+ return scheduler.scheduleWorkIn(delayMillis, [](OperationContext* opCtx) {})
+ .then([&scheduler,
+ backoff = std::move(backoff),
+ shouldRetryFn = std::move(shouldRetryFn),
+ f = std::move(f)]() mutable {
+ return doWhile(
+ scheduler, std::move(backoff), std::move(shouldRetryFn), std::move(f));
+ });
});
- });
}
} // namespace txn
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
index bed0927a650..e3df22d9b7c 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
@@ -359,7 +359,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledBlockingWorkSucceeds) {
unittest::Barrier barrier(2);
auto pf = makePromiseFuture<int>();
auto future =
- async.scheduleWork([&barrier, future = std::move(pf.future) ](OperationContext * opCtx) {
+ async.scheduleWork([&barrier, future = std::move(pf.future)](OperationContext* opCtx) {
barrier.countDownAndWait();
return future.get(opCtx);
});
@@ -377,7 +377,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledBlockingWorkThrowsException) {
unittest::Barrier barrier(2);
auto pf = makePromiseFuture<int>();
auto future =
- async.scheduleWork([&barrier, future = std::move(pf.future) ](OperationContext * opCtx) {
+ async.scheduleWork([&barrier, future = std::move(pf.future)](OperationContext* opCtx) {
barrier.countDownAndWait();
future.get(opCtx);
uasserted(ErrorCodes::InternalError, "Test error");
@@ -396,7 +396,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledBlockingWorkInSucceeds) {
auto pf = makePromiseFuture<int>();
auto future = async.scheduleWorkIn(
Milliseconds{10},
- [future = std::move(pf.future)](OperationContext * opCtx) { return future.get(opCtx); });
+ [future = std::move(pf.future)](OperationContext* opCtx) { return future.get(opCtx); });
pf.promise.emplaceValue(5);
ASSERT(!future.isReady());
diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp
index dac4caee608..6be674d1ad7 100644
--- a/src/mongo/db/s/transaction_coordinator_service.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service.cpp
@@ -147,7 +147,7 @@ void TransactionCoordinatorService::onStepUp(OperationContext* opCtx,
_catalogAndScheduler->scheduler
.scheduleWorkIn(
recoveryDelayForTesting,
- [catalogAndScheduler = _catalogAndScheduler](OperationContext * opCtx) {
+ [catalogAndScheduler = _catalogAndScheduler](OperationContext* opCtx) {
auto& replClientInfo = repl::ReplClientInfo::forClient(opCtx->getClient());
replClientInfo.setLastOpToSystemLastOpTime(opCtx);
diff --git a/src/mongo/db/s/transaction_coordinator_structures_test.cpp b/src/mongo/db/s/transaction_coordinator_structures_test.cpp
index f29b442559b..df1d3cc2ade 100644
--- a/src/mongo/db/s/transaction_coordinator_structures_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_structures_test.cpp
@@ -44,8 +44,7 @@ TEST(CoordinatorCommitDecisionTest, SerializeCommitHasTimestampAndNoAbortStatus)
ASSERT_BSONOBJ_EQ(BSON("decision"
<< "commit"
- << "commitTimestamp"
- << Timestamp(100, 200)),
+ << "commitTimestamp" << Timestamp(100, 200)),
obj);
}
diff --git a/src/mongo/db/s/transaction_coordinator_test.cpp b/src/mongo/db/s/transaction_coordinator_test.cpp
index cbed3eb021a..ad4554b406b 100644
--- a/src/mongo/db/s/transaction_coordinator_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_test.cpp
@@ -192,8 +192,7 @@ auto makeDummyPrepareCommand(const LogicalSessionId& lsid, const TxnNumber& txnN
prepareCmd.setDbName(NamespaceString::kAdminDb);
auto prepareObj = prepareCmd.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
return prepareObj;
@@ -546,17 +545,23 @@ protected:
TxnNumber txnNumber,
const std::vector<ShardId>& participants,
const boost::optional<Timestamp>& commitTimestamp) {
- txn::persistDecision(*_aws, lsid, txnNumber, participants, [&] {
- txn::CoordinatorCommitDecision decision;
- if (commitTimestamp) {
- decision.setDecision(txn::CommitDecision::kCommit);
- decision.setCommitTimestamp(commitTimestamp);
- } else {
- decision.setDecision(txn::CommitDecision::kAbort);
- decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction, "Test abort status"));
- }
- return decision;
- }()).get();
+ txn::persistDecision(*_aws,
+ lsid,
+ txnNumber,
+ participants,
+ [&] {
+ txn::CoordinatorCommitDecision decision;
+ if (commitTimestamp) {
+ decision.setDecision(txn::CommitDecision::kCommit);
+ decision.setCommitTimestamp(commitTimestamp);
+ } else {
+ decision.setDecision(txn::CommitDecision::kAbort);
+ decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction,
+ "Test abort status"));
+ }
+ return decision;
+ }())
+ .get();
auto allCoordinatorDocs = txn::readAllCoordinatorDocs(opCtx);
ASSERT_EQUALS(allCoordinatorDocs.size(), size_t(1));
@@ -733,11 +738,17 @@ TEST_F(TransactionCoordinatorDriverPersistenceTest,
// Delete the document for the first transaction and check that only the second transaction's
// document still exists.
- txn::persistDecision(*_aws, _lsid, txnNumber1, _participants, [&] {
- txn::CoordinatorCommitDecision decision(txn::CommitDecision::kAbort);
- decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction, "Test abort error"));
- return decision;
- }()).get();
+ txn::persistDecision(*_aws,
+ _lsid,
+ txnNumber1,
+ _participants,
+ [&] {
+ txn::CoordinatorCommitDecision decision(txn::CommitDecision::kAbort);
+ decision.setAbortStatus(
+ Status(ErrorCodes::NoSuchTransaction, "Test abort error"));
+ return decision;
+ }())
+ .get();
txn::deleteCoordinatorDoc(*_aws, _lsid, txnNumber1).get();
allCoordinatorDocs = txn::readAllCoordinatorDocs(operationContext());
@@ -1466,8 +1477,7 @@ TEST_F(TransactionCoordinatorMetricsTest, SimpleTwoPhaseCommitRealCoordinator) {
setGlobalFailPoint("hangBeforeWaitingForParticipantListWriteConcern",
BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("useUninterruptibleSleep" << 1)));
+ << "data" << BSON("useUninterruptibleSleep" << 1)));
coordinator.runCommit(kTwoShardIdList);
waitUntilCoordinatorDocIsPresent();
@@ -1511,8 +1521,7 @@ TEST_F(TransactionCoordinatorMetricsTest, SimpleTwoPhaseCommitRealCoordinator) {
setGlobalFailPoint("hangBeforeWaitingForDecisionWriteConcern",
BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("useUninterruptibleSleep" << 1)));
+ << "data" << BSON("useUninterruptibleSleep" << 1)));
// Respond to the second prepare request in a separate thread, because the coordinator will
// hijack that thread to run its continuation.
assertPrepareSentAndRespondWithSuccess();
@@ -1562,8 +1571,7 @@ TEST_F(TransactionCoordinatorMetricsTest, SimpleTwoPhaseCommitRealCoordinator) {
setGlobalFailPoint("hangAfterDeletingCoordinatorDoc",
BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("useUninterruptibleSleep" << 1)));
+ << "data" << BSON("useUninterruptibleSleep" << 1)));
// Respond to the second commit request in a separate thread, because the coordinator will
// hijack that thread to run its continuation.
assertCommitSentAndRespondWithSuccess();
@@ -2122,11 +2130,10 @@ TEST_F(TransactionCoordinatorMetricsTest, SlowLogLineIncludesTransactionParamete
runSimpleTwoPhaseCommitWithCommitDecisionAndCaptureLogLines();
BSONObjBuilder lsidBob;
_lsid.serialize(&lsidBob);
- ASSERT_EQUALS(
- 1,
- countLogLinesContaining(str::stream() << "parameters:{ lsid: " << lsidBob.done().toString()
- << ", txnNumber: "
- << _txnNumber));
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(str::stream()
+ << "parameters:{ lsid: " << lsidBob.done().toString()
+ << ", txnNumber: " << _txnNumber));
}
TEST_F(TransactionCoordinatorMetricsTest,
diff --git a/src/mongo/db/s/transaction_coordinator_util.cpp b/src/mongo/db/s/transaction_coordinator_util.cpp
index f49da0ac61f..dbffc60de1d 100644
--- a/src/mongo/db/s/transaction_coordinator_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_util.cpp
@@ -126,8 +126,7 @@ repl::OpTime persistParticipantListBlocking(OperationContext* opCtx,
BSONObj sameParticipantList =
BSON("$and" << buildParticipantListMatchesConditions(participantList));
entry.setQ(BSON(TransactionCoordinatorDocument::kIdFieldName
- << sessionInfo.toBSON()
- << "$or"
+ << sessionInfo.toBSON() << "$or"
<< BSON_ARRAY(noParticipantList << sameParticipantList)));
// Update with participant list.
@@ -154,13 +153,9 @@ repl::OpTime persistParticipantListBlocking(OperationContext* opCtx,
QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51025,
str::stream() << "While attempting to write participant list "
- << buildParticipantListString(participantList)
- << " for "
- << lsid.getId()
- << ':'
- << txnNumber
- << ", found document with a different participant list: "
- << doc);
+ << buildParticipantListString(participantList) << " for "
+ << lsid.getId() << ':' << txnNumber
+ << ", found document with a different participant list: " << doc);
}
// Throw any other error.
@@ -223,8 +218,7 @@ Future<PrepareVoteConsensus> sendPrepare(ServiceContext* service,
prepareTransaction.setDbName(NamespaceString::kAdminDb);
auto prepareObj = prepareTransaction.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
std::vector<Future<PrepareResponse>> responses;
@@ -245,7 +239,7 @@ Future<PrepareVoteConsensus> sendPrepare(ServiceContext* service,
// Initial value
PrepareVoteConsensus{int(participants.size())},
// Aggregates an incoming response (next) with the existing aggregate value (result)
- [&prepareScheduler = *prepareScheduler](PrepareVoteConsensus & result,
+ [&prepareScheduler = *prepareScheduler](PrepareVoteConsensus& result,
const PrepareResponse& next) {
result.registerVote(next);
@@ -300,10 +294,8 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx,
BSON(TransactionCoordinatorDocument::kDecisionFieldName << decision.toBSON());
entry.setQ(BSON(TransactionCoordinatorDocument::kIdFieldName
- << sessionInfo.toBSON()
- << "$and"
- << buildParticipantListMatchesConditions(participantList)
- << "$or"
+ << sessionInfo.toBSON() << "$and"
+ << buildParticipantListMatchesConditions(participantList) << "$or"
<< BSON_ARRAY(noDecision << sameDecision)));
entry.setU([&] {
@@ -333,11 +325,8 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx,
QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51026,
str::stream() << "While attempting to write decision "
- << (isCommit ? "'commit'" : "'abort'")
- << " for"
- << lsid.getId()
- << ':'
- << txnNumber
+ << (isCommit ? "'commit'" : "'abort'") << " for" << lsid.getId()
+ << ':' << txnNumber
<< ", either failed to find document for this lsid:txnNumber or "
"document existed with a different participant list, decision "
"or commitTimestamp: "
@@ -379,8 +368,7 @@ Future<void> sendCommit(ServiceContext* service,
commitTransaction.setCommitTimestamp(commitTimestamp);
auto commitObj = commitTransaction.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
std::vector<Future<void>> responses;
for (const auto& participant : participants) {
@@ -398,8 +386,7 @@ Future<void> sendAbort(ServiceContext* service,
abortTransaction.setDbName(NamespaceString::kAdminDb);
auto abortObj = abortTransaction.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
std::vector<Future<void>> responses;
for (const auto& participant : participants) {
@@ -529,12 +516,12 @@ Future<PrepareResponse> sendPrepareToShard(ServiceContext* service,
swPrepareResponse != ErrorCodes::TransactionCoordinatorSteppingDown &&
swPrepareResponse != ErrorCodes::TransactionCoordinatorReachedAbortDecision;
},
- [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned() ] {
+ [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned()] {
LOG(3) << "Coordinator going to send command " << commandObj << " to "
<< (isLocalShard ? " local " : "") << " shard " << shardId;
return scheduler.scheduleRemoteCommand(shardId, kPrimaryReadPreference, commandObj)
- .then([ shardId, commandObj = commandObj.getOwned() ](ResponseStatus response) {
+ .then([shardId, commandObj = commandObj.getOwned()](ResponseStatus response) {
auto status = getStatusFromCommandResult(response.data);
auto wcStatus = getWriteConcernStatusFromCommandResult(response.data);
@@ -621,12 +608,12 @@ Future<void> sendDecisionToShard(ServiceContext* service,
// coordinator-specific code.
return !s.isOK() && s != ErrorCodes::TransactionCoordinatorSteppingDown;
},
- [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned() ] {
+ [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned()] {
LOG(3) << "Coordinator going to send command " << commandObj << " to "
<< (isLocalShard ? "local" : "") << " shard " << shardId;
return scheduler.scheduleRemoteCommand(shardId, kPrimaryReadPreference, commandObj)
- .then([ shardId, commandObj = commandObj.getOwned() ](ResponseStatus response) {
+ .then([shardId, commandObj = commandObj.getOwned()](ResponseStatus response) {
auto status = getStatusFromCommandResult(response.data);
auto wcStatus = getWriteConcernStatusFromCommandResult(response.data);
diff --git a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
index 3cb6b8c1cbe..b48811ec994 100644
--- a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
+++ b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
@@ -132,12 +132,11 @@ public:
replClient.setLastOp(opCtx, prepareOpTime);
}
- invariant(opCtx->recoveryUnit()->getPrepareTimestamp() ==
- prepareOpTime.getTimestamp(),
- str::stream() << "recovery unit prepareTimestamp: "
- << opCtx->recoveryUnit()->getPrepareTimestamp().toString()
- << " participant prepareOpTime: "
- << prepareOpTime.toString());
+ invariant(
+ opCtx->recoveryUnit()->getPrepareTimestamp() == prepareOpTime.getTimestamp(),
+ str::stream() << "recovery unit prepareTimestamp: "
+ << opCtx->recoveryUnit()->getPrepareTimestamp().toString()
+ << " participant prepareOpTime: " << prepareOpTime.toString());
if (MONGO_FAIL_POINT(
participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic)) {
diff --git a/src/mongo/db/s/type_shard_identity_test.cpp b/src/mongo/db/s/type_shard_identity_test.cpp
index 56c2ca059de..b4999f5c6eb 100644
--- a/src/mongo/db/s/type_shard_identity_test.cpp
+++ b/src/mongo/db/s/type_shard_identity_test.cpp
@@ -46,9 +46,7 @@ TEST(ShardIdentityType, RoundTrip) {
<< "shardIdentity"
<< "shardName"
<< "s1"
- << "clusterId"
- << clusterId
- << "configsvrConnectionString"
+ << "clusterId" << clusterId << "configsvrConnectionString"
<< "test/a:123");
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
@@ -67,8 +65,7 @@ TEST(ShardIdentityType, ParseMissingId) {
<< "test/a:123"
<< "shardName"
<< "s1"
- << "clusterId"
- << OID::gen());
+ << "clusterId" << OID::gen());
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -79,8 +76,7 @@ TEST(ShardIdentityType, ParseMissingConfigsvrConnString) {
<< "shardIdentity"
<< "shardName"
<< "s1"
- << "clusterId"
- << OID::gen());
+ << "clusterId" << OID::gen());
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -91,8 +87,7 @@ TEST(ShardIdentityType, ParseMissingShardName) {
<< "shardIdentity"
<< "configsvrConnectionString"
<< "test/a:123"
- << "clusterId"
- << OID::gen());
+ << "clusterId" << OID::gen());
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -118,8 +113,7 @@ TEST(ShardIdentityType, InvalidConnectionString) {
<< "test/,,,"
<< "shardName"
<< "s1"
- << "clusterId"
- << clusterId);
+ << "clusterId" << clusterId);
ASSERT_EQ(ErrorCodes::FailedToParse,
ShardIdentityType::fromShardIdentityDocument(doc).getStatus());
@@ -133,8 +127,7 @@ TEST(ShardIdentityType, NonReplSetConnectionString) {
<< "local:123"
<< "shardName"
<< "s1"
- << "clusterId"
- << clusterId);
+ << "clusterId" << clusterId);
ASSERT_EQ(ErrorCodes::UnsupportedFormat,
ShardIdentityType::fromShardIdentityDocument(doc).getStatus());
@@ -147,5 +140,5 @@ TEST(ShardIdentityType, CreateUpdateObject) {
ASSERT_BSONOBJ_EQ(expectedObj, updateObj);
}
+} // namespace
} // namespace mongo
-} // unnamed namespace
diff --git a/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp b/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp
index 1ff67ff3257..d1ceaaeeba6 100644
--- a/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp
+++ b/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp
@@ -90,5 +90,5 @@ MONGO_INITIALIZER(RegisterWaitForOngoingChunkSplitsCommand)(InitializerContext*
}
return Status::OK();
}
-}
-}
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/server_options.h b/src/mongo/db/server_options.h
index 14237da05e2..641f7f8fa9c 100644
--- a/src/mongo/db/server_options.h
+++ b/src/mongo/db/server_options.h
@@ -130,23 +130,23 @@ struct ServerGlobalParams {
enum ClusterAuthModes {
ClusterAuthMode_undefined,
/**
- * Authenticate using keyfile, accept only keyfiles
- */
+ * Authenticate using keyfile, accept only keyfiles
+ */
ClusterAuthMode_keyFile,
/**
- * Authenticate using keyfile, accept both keyfiles and X.509
- */
+ * Authenticate using keyfile, accept both keyfiles and X.509
+ */
ClusterAuthMode_sendKeyFile,
/**
- * Authenticate using X.509, accept both keyfiles and X.509
- */
+ * Authenticate using X.509, accept both keyfiles and X.509
+ */
ClusterAuthMode_sendX509,
/**
- * Authenticate using X.509, accept only X.509
- */
+ * Authenticate using X.509, accept only X.509
+ */
ClusterAuthMode_x509
};
@@ -271,4 +271,4 @@ struct TraitNamedDomain {
return ret;
}
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/server_options_helpers.h b/src/mongo/db/server_options_helpers.h
index da7098f23c4..a79dde98b39 100644
--- a/src/mongo/db/server_options_helpers.h
+++ b/src/mongo/db/server_options_helpers.h
@@ -43,18 +43,18 @@ class Environment;
namespace moe = mongo::optionenvironment;
/**
-* Handle custom validation of base options that can not currently be done by using
-* Constraints in the Environment. See the "validate" function in the Environment class for
-* more details.
-*/
+ * Handle custom validation of base options that can not currently be done by using
+ * Constraints in the Environment. See the "validate" function in the Environment class for
+ * more details.
+ */
Status validateBaseOptions(const moe::Environment& params);
/**
-* Canonicalize base options for the given environment.
-*
-* For example, the options "objcheck", "noobjcheck", and "net.wireObjectCheck" should all be
-* merged into "net.wireObjectCheck".
-*/
+ * Canonicalize base options for the given environment.
+ *
+ * For example, the options "objcheck", "noobjcheck", and "net.wireObjectCheck" should all be
+ * merged into "net.wireObjectCheck".
+ */
Status canonicalizeBaseOptions(moe::Environment* params);
/**
@@ -67,11 +67,11 @@ Status canonicalizeBaseOptions(moe::Environment* params);
Status setupBaseOptions(const std::vector<std::string>& args);
/**
-* Store the given parsed params in global server state.
-*
-* For example, sets the serverGlobalParams.quiet variable based on the systemLog.quiet config
-* parameter.
-*/
+ * Store the given parsed params in global server state.
+ *
+ * For example, sets the serverGlobalParams.quiet variable based on the systemLog.quiet config
+ * parameter.
+ */
Status storeBaseOptions(const moe::Environment& params);
} // namespace mongo
diff --git a/src/mongo/db/service_context_test_fixture.h b/src/mongo/db/service_context_test_fixture.h
index edbd5021816..e7508898c0f 100644
--- a/src/mongo/db/service_context_test_fixture.h
+++ b/src/mongo/db/service_context_test_fixture.h
@@ -39,9 +39,9 @@ namespace mongo {
class ScopedGlobalServiceContextForTest {
public:
/**
- * Returns a service context, which is only valid for this instance of the test.
- * Must not be called before setUp or after tearDown.
- */
+ * Returns a service context, which is only valid for this instance of the test.
+ * Must not be called before setUp or after tearDown.
+ */
ServiceContext* getServiceContext();
protected:
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 849a6744128..349d25326f4 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -123,9 +123,10 @@ void generateLegacyQueryErrorResponse(const AssertionException& exception,
curop->debug().errInfo = exception.toStatus();
log(LogComponent::kQuery) << "assertion " << exception.toString() << " ns:" << queryMessage.ns
- << " query:" << (queryMessage.query.valid(BSONVersion::kLatest)
- ? redact(queryMessage.query)
- : "query object is corrupt");
+ << " query:"
+ << (queryMessage.query.valid(BSONVersion::kLatest)
+ ? redact(queryMessage.query)
+ : "query object is corrupt");
if (queryMessage.ntoskip || queryMessage.ntoreturn) {
log(LogComponent::kQuery) << " ntoskip:" << queryMessage.ntoskip
<< " ntoreturn:" << queryMessage.ntoreturn;
@@ -971,8 +972,8 @@ DbResponse receivedCommands(OperationContext* opCtx,
// However, the complete command object will still be echoed to the client.
if (!(c = CommandHelpers::findCommand(request.getCommandName()))) {
globalCommandRegistry()->incrementUnknownCommands();
- std::string msg = str::stream() << "no such command: '" << request.getCommandName()
- << "'";
+ std::string msg = str::stream()
+ << "no such command: '" << request.getCommandName() << "'";
LOG(2) << msg;
uasserted(ErrorCodes::CommandNotFound, str::stream() << msg);
}
@@ -1008,12 +1009,10 @@ DbResponse receivedCommands(OperationContext* opCtx,
if (LastError::get(opCtx->getClient()).hadNotMasterError()) {
notMasterUnackWrites.increment();
uasserted(ErrorCodes::NotMaster,
- str::stream() << "Not-master error while processing '"
- << request.getCommandName()
- << "' operation on '"
- << request.getDatabase()
- << "' database via "
- << "fire-and-forget command execution.");
+ str::stream()
+ << "Not-master error while processing '" << request.getCommandName()
+ << "' operation on '" << request.getDatabase() << "' database via "
+ << "fire-and-forget command execution.");
}
return {}; // Don't reply.
}
@@ -1302,10 +1301,8 @@ DbResponse ServiceEntryPointCommon::handleRequest(OperationContext* opCtx,
if (!opCtx->getClient()->isInDirectClient()) {
uassert(18663,
str::stream() << "legacy writeOps not longer supported for "
- << "versioned connections, ns: "
- << nsString.ns()
- << ", op: "
- << networkOpToString(op),
+ << "versioned connections, ns: " << nsString.ns()
+ << ", op: " << networkOpToString(op),
!ShardedConnectionInfo::get(&c, false));
}
@@ -1333,12 +1330,10 @@ DbResponse ServiceEntryPointCommon::handleRequest(OperationContext* opCtx,
if (LastError::get(opCtx->getClient()).hadNotMasterError()) {
notMasterLegacyUnackWrites.increment();
uasserted(ErrorCodes::NotMaster,
- str::stream() << "Not-master error while processing '"
- << networkOpToString(op)
- << "' operation on '"
- << nsString
- << "' namespace via legacy "
- << "fire-and-forget command execution.");
+ str::stream()
+ << "Not-master error while processing '" << networkOpToString(op)
+ << "' operation on '" << nsString << "' namespace via legacy "
+ << "fire-and-forget command execution.");
}
}
diff --git a/src/mongo/db/session_catalog_mongod.cpp b/src/mongo/db/session_catalog_mongod.cpp
index e52a99383f1..2f6145f0287 100644
--- a/src/mongo/db/session_catalog_mongod.cpp
+++ b/src/mongo/db/session_catalog_mongod.cpp
@@ -37,7 +37,6 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/ops/write_ops.h"
@@ -92,8 +91,8 @@ void killSessionTokens(OperationContext* opCtx,
return;
getThreadPool(opCtx)->schedule(
- [ service = opCtx->getServiceContext(),
- sessionKillTokens = std::move(sessionKillTokens) ](auto status) mutable {
+ [service = opCtx->getServiceContext(),
+ sessionKillTokens = std::move(sessionKillTokens)](auto status) mutable {
invariant(status);
ThreadClient tc("Kill-Sessions", service);
@@ -185,11 +184,10 @@ void createTransactionTable(OperationContext* opCtx) {
return;
}
- uassertStatusOKWithContext(status,
- str::stream()
- << "Failed to create the "
- << NamespaceString::kSessionTransactionsTableNamespace.ns()
- << " collection");
+ uassertStatusOKWithContext(
+ status,
+ str::stream() << "Failed to create the "
+ << NamespaceString::kSessionTransactionsTableNamespace.ns() << " collection");
}
void abortInProgressTransactions(OperationContext* opCtx) {
diff --git a/src/mongo/db/session_catalog_test.cpp b/src/mongo/db/session_catalog_test.cpp
index da7712f89d4..a5512625dbf 100644
--- a/src/mongo/db/session_catalog_test.cpp
+++ b/src/mongo/db/session_catalog_test.cpp
@@ -123,12 +123,14 @@ TEST_F(SessionCatalogTest, ScanSession) {
makeLogicalSessionIdForTest(),
makeLogicalSessionIdForTest()};
for (const auto& lsid : lsids) {
- stdx::async(stdx::launch::async, [this, lsid] {
- ThreadClient tc(getServiceContext());
- auto opCtx = makeOperationContext();
- opCtx->setLogicalSessionId(lsid);
- OperationContextSession ocs(opCtx.get());
- }).get();
+ stdx::async(stdx::launch::async,
+ [this, lsid] {
+ ThreadClient tc(getServiceContext());
+ auto opCtx = makeOperationContext();
+ opCtx->setLogicalSessionId(lsid);
+ OperationContextSession ocs(opCtx.get());
+ })
+ .get();
}
catalog()->scanSession(lsids[0], [&lsids](const ObservableSession& session) {
@@ -154,12 +156,14 @@ TEST_F(SessionCatalogTest, ScanSessionMarkForReapWhenSessionIsIdle) {
makeLogicalSessionIdForTest(),
makeLogicalSessionIdForTest()};
for (const auto& lsid : lsids) {
- stdx::async(stdx::launch::async, [this, lsid] {
- ThreadClient tc(getServiceContext());
- auto opCtx = makeOperationContext();
- opCtx->setLogicalSessionId(lsid);
- OperationContextSession ocs(opCtx.get());
- }).get();
+ stdx::async(stdx::launch::async,
+ [this, lsid] {
+ ThreadClient tc(getServiceContext());
+ auto opCtx = makeOperationContext();
+ opCtx->setLogicalSessionId(lsid);
+ OperationContextSession ocs(opCtx.get());
+ })
+ .get();
}
catalog()->scanSession(lsids[0],
@@ -196,12 +200,14 @@ TEST_F(SessionCatalogTestWithDefaultOpCtx, ScanSessions) {
makeLogicalSessionIdForTest(),
makeLogicalSessionIdForTest()};
for (const auto& lsid : lsids) {
- stdx::async(stdx::launch::async, [this, lsid] {
- ThreadClient tc(getServiceContext());
- auto opCtx = makeOperationContext();
- opCtx->setLogicalSessionId(lsid);
- OperationContextSession ocs(opCtx.get());
- }).get();
+ stdx::async(stdx::launch::async,
+ [this, lsid] {
+ ThreadClient tc(getServiceContext());
+ auto opCtx = makeOperationContext();
+ opCtx->setLogicalSessionId(lsid);
+ OperationContextSession ocs(opCtx.get());
+ })
+ .get();
}
// Scan over all Sessions.
diff --git a/src/mongo/db/sessions_collection_config_server.h b/src/mongo/db/sessions_collection_config_server.h
index 3338979d8e2..bdfac76abff 100644
--- a/src/mongo/db/sessions_collection_config_server.h
+++ b/src/mongo/db/sessions_collection_config_server.h
@@ -46,18 +46,18 @@ class OperationContext;
class SessionsCollectionConfigServer : public SessionsCollectionSharded {
public:
/**
- * Ensures that the sessions collection has been set up for this cluster,
- * sharded, and with the proper indexes.
- *
- * This method may safely be called multiple times.
- *
- * If there are no shards in this cluster, this method will do nothing.
- */
+ * Ensures that the sessions collection has been set up for this cluster,
+ * sharded, and with the proper indexes.
+ *
+ * This method may safely be called multiple times.
+ *
+ * If there are no shards in this cluster, this method will do nothing.
+ */
Status setupSessionsCollection(OperationContext* opCtx) override;
/**
- * Checks if the sessions collection exists.
- */
+ * Checks if the sessions collection exists.
+ */
Status checkSessionsCollectionExists(OperationContext* opCtx) override;
private:
diff --git a/src/mongo/db/sorter/sorter.cpp b/src/mongo/db/sorter/sorter.cpp
index 4efa4a79840..37b8d98aa02 100644
--- a/src/mongo/db/sorter/sorter.cpp
+++ b/src/mongo/db/sorter/sorter.cpp
@@ -172,24 +172,21 @@ public:
void openSource() {
_file.open(_fileName.c_str(), std::ios::in | std::ios::binary);
uassert(16814,
- str::stream() << "error opening file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
+ str::stream() << "error opening file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
_file.good());
_file.seekg(_fileStartOffset);
uassert(50979,
str::stream() << "error seeking starting offset of '" << _fileStartOffset
- << "' in file \""
- << _fileName
- << "\": "
- << myErrnoWithDescription(),
+ << "' in file \"" << _fileName << "\": " << myErrnoWithDescription(),
_file.good());
}
void closeSource() {
_file.close();
uassert(50969,
- str::stream() << "error closing file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
+ str::stream() << "error closing file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
!_file.fail());
}
@@ -290,8 +287,8 @@ private:
const std::streampos offset = _file.tellg();
uassert(51049,
- str::stream() << "error reading file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
+ str::stream() << "error reading file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
offset >= 0);
if (offset >= _fileEndOffset) {
@@ -302,8 +299,8 @@ private:
_file.read(reinterpret_cast<char*>(out), size);
uassert(16817,
- str::stream() << "error reading file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
+ str::stream() << "error reading file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
_file.good());
verify(_file.gcount() == static_cast<std::streamsize>(size));
}
@@ -556,8 +553,7 @@ private:
// need to be revisited.
uasserted(16819,
str::stream()
- << "Sort exceeded memory limit of "
- << _opts.maxMemoryUsageBytes
+ << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
<< " bytes, but did not opt in to external sorting. Aborting operation."
<< " Pass allowDiskUse:true to opt in.");
}
@@ -844,8 +840,7 @@ private:
// need to be revisited.
uasserted(16820,
str::stream()
- << "Sort exceeded memory limit of "
- << _opts.maxMemoryUsageBytes
+ << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
<< " bytes, but did not opt in to external sorting. Aborting operation."
<< " Pass allowDiskUse:true to opt in.");
}
@@ -921,8 +916,8 @@ SortedFileWriter<Key, Value>::SortedFileWriter(const SortOptions& opts,
// limits.
_file.open(_fileName.c_str(), std::ios::binary | std::ios::app | std::ios::out);
uassert(16818,
- str::stream() << "error opening file \"" << _fileName << "\": "
- << sorter::myErrnoWithDescription(),
+ str::stream() << "error opening file \"" << _fileName
+ << "\": " << sorter::myErrnoWithDescription(),
_file.good());
// The file descriptor is positioned at the end of a file when opened in append mode, but
// _file.tellp() is not initialized on all systems to reflect this. Therefore, we must also pass
@@ -985,8 +980,8 @@ void SortedFileWriter<Key, Value>::spill() {
_file.write(outBuffer, std::abs(size));
} catch (const std::exception&) {
msgasserted(16821,
- str::stream() << "error writing to file \"" << _fileName << "\": "
- << sorter::myErrnoWithDescription());
+ str::stream() << "error writing to file \"" << _fileName
+ << "\": " << sorter::myErrnoWithDescription());
}
_buffer.reset();
@@ -998,8 +993,7 @@ SortIteratorInterface<Key, Value>* SortedFileWriter<Key, Value>::done() {
std::streampos currentFileOffset = _file.tellp();
uassert(50980,
str::stream() << "error fetching current file descriptor offset in file \"" << _fileName
- << "\": "
- << sorter::myErrnoWithDescription(),
+ << "\": " << sorter::myErrnoWithDescription(),
currentFileOffset >= 0);
// In case nothing was written to disk, use _fileStartOffset because tellp() may not be
@@ -1047,4 +1041,4 @@ Sorter<Key, Value>* Sorter<Key, Value>::make(const SortOptions& opts,
return new sorter::TopKSorter<Key, Value, Comparator>(opts, comp, settings);
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/sorter/sorter.h b/src/mongo/db/sorter/sorter.h
index f504d466ac3..fccb3eef115 100644
--- a/src/mongo/db/sorter/sorter.h
+++ b/src/mongo/db/sorter/sorter.h
@@ -266,7 +266,7 @@ private:
std::streampos _fileStartOffset;
std::streampos _fileEndOffset;
};
-}
+} // namespace mongo
/**
* #include "mongo/db/sorter/sorter.cpp" and call this in a single translation
diff --git a/src/mongo/db/startup_warnings_common.cpp b/src/mongo/db/startup_warnings_common.cpp
index 31a8b6c04b8..099df94ceac 100644
--- a/src/mongo/db/startup_warnings_common.cpp
+++ b/src/mongo/db/startup_warnings_common.cpp
@@ -100,9 +100,9 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
#endif
/*
- * We did not add the message to startupWarningsLog as the user can not
- * specify a sslCAFile parameter from the shell
- */
+ * We did not add the message to startupWarningsLog as the user can not
+ * specify a sslCAFile parameter from the shell
+ */
if (sslGlobalParams.sslMode.load() != SSLParams::SSLMode_disabled &&
#ifdef MONGO_CONFIG_SSL_CERTIFICATE_SELECTORS
sslGlobalParams.sslCertificateSelector.empty() &&
diff --git a/src/mongo/db/startup_warnings_mongod.cpp b/src/mongo/db/startup_warnings_mongod.cpp
index 470fc90388e..8cffdb2088a 100644
--- a/src/mongo/db/startup_warnings_mongod.cpp
+++ b/src/mongo/db/startup_warnings_mongod.cpp
@@ -111,9 +111,9 @@ StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter
opMode = line.substr(posBegin + 1, posEnd - posBegin - 1);
if (opMode.empty()) {
- return StatusWith<std::string>(
- ErrorCodes::BadValue,
- str::stream() << "invalid mode in " << filename << ": '" << line << "'");
+ return StatusWith<std::string>(ErrorCodes::BadValue,
+ str::stream() << "invalid mode in " << filename << ": '"
+ << line << "'");
}
// Check against acceptable values of opMode.
@@ -122,16 +122,12 @@ StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter
ErrorCodes::BadValue,
str::stream()
<< "** WARNING: unrecognized transparent Huge Pages mode of operation in "
- << filename
- << ": '"
- << opMode
- << "''");
+ << filename << ": '" << opMode << "''");
}
} catch (const boost::filesystem::filesystem_error& err) {
return StatusWith<std::string>(ErrorCodes::UnknownError,
str::stream() << "Failed to probe \"" << err.path1().string()
- << "\": "
- << err.code().message());
+ << "\": " << err.code().message());
}
return StatusWith<std::string>(opMode);
diff --git a/src/mongo/db/stats/counters.cpp b/src/mongo/db/stats/counters.cpp
index 4c5fa73bcd4..5e667340d1a 100644
--- a/src/mongo/db/stats/counters.cpp
+++ b/src/mongo/db/stats/counters.cpp
@@ -159,4 +159,4 @@ void NetworkCounter::append(BSONObjBuilder& b) {
OpCounters globalOpCounters;
OpCounters replOpCounters;
NetworkCounter networkCounter;
-}
+} // namespace mongo
diff --git a/src/mongo/db/stats/counters.h b/src/mongo/db/stats/counters.h
index d74402c8571..d7b8a0b88ec 100644
--- a/src/mongo/db/stats/counters.h
+++ b/src/mongo/db/stats/counters.h
@@ -139,4 +139,4 @@ private:
};
extern NetworkCounter networkCounter;
-}
+} // namespace mongo
diff --git a/src/mongo/db/stats/fine_clock.h b/src/mongo/db/stats/fine_clock.h
index d01c2e74d4a..fe793ef16bc 100644
--- a/src/mongo/db/stats/fine_clock.h
+++ b/src/mongo/db/stats/fine_clock.h
@@ -69,6 +69,6 @@ public:
return diff;
}
};
-}
+} // namespace mongo
#endif // DB_STATS_FINE_CLOCK_HEADER
diff --git a/src/mongo/db/stats/timer_stats.cpp b/src/mongo/db/stats/timer_stats.cpp
index bb52e0226d7..35b1027fff1 100644
--- a/src/mongo/db/stats/timer_stats.cpp
+++ b/src/mongo/db/stats/timer_stats.cpp
@@ -69,4 +69,4 @@ BSONObj TimerStats::getReport() const {
b.appendNumber("totalMillis", t);
return b.obj();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/stats/timer_stats.h b/src/mongo/db/stats/timer_stats.h
index d09533bd537..029a238577c 100644
--- a/src/mongo/db/stats/timer_stats.h
+++ b/src/mongo/db/stats/timer_stats.h
@@ -88,4 +88,4 @@ private:
bool _recorded;
Timer _t;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_record_store.cpp b/src/mongo/db/storage/biggie/biggie_record_store.cpp
index 8f69ee8d617..8cd4ae9d893 100644
--- a/src/mongo/db/storage/biggie/biggie_record_store.cpp
+++ b/src/mongo/db/storage/biggie/biggie_record_store.cpp
@@ -55,8 +55,7 @@ Ordering allAscending = Ordering::make(BSONObj());
auto const version = KeyString::Version::V1;
BSONObj const sample = BSON(""
<< "s"
- << ""
- << (int64_t)0);
+ << "" << (int64_t)0);
std::string createKey(StringData ident, int64_t recordId) {
KeyString ks(version, BSON("" << ident << "" << recordId), allAscending);
@@ -608,7 +607,7 @@ RecordStore::SizeAdjuster::~SizeAdjuster() {
int64_t deltaDataSize = _workingCopy->dataSize() - _origDataSize;
_rs->_numRecords.fetchAndAdd(deltaNumRecords);
_rs->_dataSize.fetchAndAdd(deltaDataSize);
- RecoveryUnit::get(_opCtx)->onRollback([ rs = _rs, deltaNumRecords, deltaDataSize ]() {
+ RecoveryUnit::get(_opCtx)->onRollback([rs = _rs, deltaNumRecords, deltaDataSize]() {
invariant(rs->_numRecords.load() >= deltaNumRecords);
rs->_numRecords.fetchAndSubtract(deltaNumRecords);
rs->_dataSize.fetchAndSubtract(deltaDataSize);
diff --git a/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp b/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp
index 5ff7ca7cb75..95147d485a3 100644
--- a/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp
+++ b/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp
@@ -57,12 +57,8 @@ public:
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
- << "v"
- << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
- << "ns"
- << ns
- << "unique"
- << unique);
+ << "v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
+ << "ns" << ns << "unique" << unique);
if (partial) {
auto partialBSON =
BSON(IndexDescriptor::kPartialFilterExprFieldName.toString() << BSON(""
diff --git a/src/mongo/db/storage/biggie/store.h b/src/mongo/db/storage/biggie/store.h
index 6c0c883f108..a09b5b49b63 100644
--- a/src/mongo/db/storage/biggie/store.h
+++ b/src/mongo/db/storage/biggie/store.h
@@ -153,10 +153,10 @@ public:
: _root(root), _current(current) {}
/**
- * This function traverses the tree to find the next left-most node with data. Modifies
- * '_current' to point to this node. It uses a pre-order traversal ('visit' the current
- * node itself then 'visit' the child subtrees from left to right).
- */
+ * This function traverses the tree to find the next left-most node with data. Modifies
+ * '_current' to point to this node. It uses a pre-order traversal ('visit' the current
+ * node itself then 'visit' the child subtrees from left to right).
+ */
void _findNext() {
// If 'current' is a nullptr there is no next node to go to.
if (_current == nullptr)
diff --git a/src/mongo/db/storage/biggie/store_test.cpp b/src/mongo/db/storage/biggie/store_test.cpp
index cc4c8a5d7ca..e75a81bc7c7 100644
--- a/src/mongo/db/storage/biggie/store_test.cpp
+++ b/src/mongo/db/storage/biggie/store_test.cpp
@@ -2492,5 +2492,5 @@ TEST_F(RadixStoreTest, LowerBoundEndpoint) {
ASSERT_TRUE(it == thisStore.end());
}
-} // biggie namespace
-} // mongo namespace
+} // namespace biggie
+} // namespace mongo
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index 62ff6b13d93..2d7bc0b42c6 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -254,4 +254,4 @@ void BSONCollectionCatalogEntry::MetaData::parse(const BSONObj& obj) {
prefix = KVPrefix::fromBSONElement(obj["prefix"]);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h
index 15405b1942d..4f71435937c 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.h
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.h
@@ -110,4 +110,4 @@ public:
KVPrefix prefix = KVPrefix::kNotPrefixed;
};
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/capped_callback.h b/src/mongo/db/storage/capped_callback.h
index 44b11310544..cced6f61f5d 100644
--- a/src/mongo/db/storage/capped_callback.h
+++ b/src/mongo/db/storage/capped_callback.h
@@ -63,4 +63,4 @@ public:
*/
virtual void notifyCappedWaitersIfNeeded() = 0;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.h b/src/mongo/db/storage/devnull/devnull_kv_engine.h
index cbf4373476f..ddf0406bf10 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.h
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.h
@@ -157,4 +157,4 @@ private:
int _cachePressureForTest;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/durable_catalog_impl.cpp b/src/mongo/db/storage/durable_catalog_impl.cpp
index 499a4215fb0..69e263b7e9d 100644
--- a/src/mongo/db/storage/durable_catalog_impl.cpp
+++ b/src/mongo/db/storage/durable_catalog_impl.cpp
@@ -804,7 +804,7 @@ StatusWith<std::unique_ptr<RecordStore>> DurableCatalogImpl::createCollection(
}
CollectionUUID uuid = options.uuid.get();
- opCtx->recoveryUnit()->onRollback([ opCtx, catalog = this, nss, ident, uuid ]() {
+ opCtx->recoveryUnit()->onRollback([opCtx, catalog = this, nss, ident, uuid]() {
// Intentionally ignoring failure
catalog->_engine->getEngine()->dropIdent(opCtx, ident).ignore();
});
@@ -871,7 +871,7 @@ Status DurableCatalogImpl::dropCollection(OperationContext* opCtx, const Namespa
// This will notify the storageEngine to drop the collection only on WUOW::commit().
opCtx->recoveryUnit()->onCommit(
- [ opCtx, catalog = this, nss, uuid, ident ](boost::optional<Timestamp> commitTimestamp) {
+ [opCtx, catalog = this, nss, uuid, ident](boost::optional<Timestamp> commitTimestamp) {
StorageEngineInterface* engine = catalog->_engine;
auto storageEngine = engine->getStorageEngine();
if (storageEngine->supportsPendingDrops() && commitTimestamp) {
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
index 54ee0141b7f..6e921284506 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
@@ -126,4 +126,4 @@ std::vector<std::string> EphemeralForTestEngine::getAllIdents(OperationContext*
}
return all;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
index 9a6e30d9e5b..04480585d29 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
@@ -134,4 +134,4 @@ private:
// Notified when we write as everything is considered "journalled" since repl depends on it.
JournalListener* _journalListener = &NoOpJournalListener::instance;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
index 3ce3af87357..cea71436ecd 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
@@ -387,10 +387,8 @@ StatusWith<RecordId> EphemeralForTestRecordStore::extractAndCheckLocForOplog(con
return StatusWith<RecordId>(ErrorCodes::BadValue,
str::stream() << "attempted out-of-order oplog insert of "
- << status.getValue()
- << " (oplog last insert was "
- << _data->records.rbegin()->first
- << " )");
+ << status.getValue() << " (oplog last insert was "
+ << _data->records.rbegin()->first << " )");
}
return status;
}
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp
index 2305f72b52d..fbddf0ebd94 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp
@@ -71,4 +71,4 @@ void EphemeralForTestRecoveryUnit::abortUnitOfWork() {
Status EphemeralForTestRecoveryUnit::obtainMajorityCommittedSnapshot() {
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/journal_listener.h b/src/mongo/db/storage/journal_listener.h
index 275b8ad05d7..88597adb2bc 100644
--- a/src/mongo/db/storage/journal_listener.h
+++ b/src/mongo/db/storage/journal_listener.h
@@ -70,4 +70,4 @@ public:
// As this has no state, it is de facto const and can be safely shared freely.
static NoOpJournalListener instance;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp
index 6a279990fb6..8c37cc2ef52 100644
--- a/src/mongo/db/storage/key_string.cpp
+++ b/src/mongo/db/storage/key_string.cpp
@@ -1302,9 +1302,9 @@ void toBsonValue(uint8_t ctype,
break;
}
- //
- // Numerics
- //
+ //
+ // Numerics
+ //
case CType::kNumericNaN: {
auto type = typeBits->readNumeric();
@@ -1417,7 +1417,7 @@ void toBsonValue(uint8_t ctype,
case CType::kNumericNegativeSmallMagnitude:
inverted = !inverted;
isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
+ // fallthrough (format is the same as positive, but inverted)
case CType::kNumericPositiveSmallMagnitude: {
const uint8_t originalType = typeBits->readNumeric();
@@ -1548,7 +1548,7 @@ void toBsonValue(uint8_t ctype,
case CType::kNumericNegative1ByteInt:
inverted = !inverted;
isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
+ // fallthrough (format is the same as positive, but inverted)
case CType::kNumericPositive1ByteInt:
case CType::kNumericPositive2ByteInt:
@@ -1788,9 +1788,9 @@ void filterKeyFromKeyString(uint8_t ctype,
break;
}
- //
- // Numerics
- //
+ //
+ // Numerics
+ //
case CType::kNumericNaN: {
break;
@@ -1829,7 +1829,7 @@ void filterKeyFromKeyString(uint8_t ctype,
case CType::kNumericNegativeSmallMagnitude:
inverted = !inverted;
isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
+ // fallthrough (format is the same as positive, but inverted)
case CType::kNumericPositiveSmallMagnitude: {
uint64_t encoded = readType<uint64_t>(reader, inverted);
@@ -1891,7 +1891,7 @@ void filterKeyFromKeyString(uint8_t ctype,
case CType::kNumericNegative1ByteInt:
inverted = !inverted;
isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
+ // fallthrough (format is the same as positive, but inverted)
case CType::kNumericPositive1ByteInt:
case CType::kNumericPositive2ByteInt:
diff --git a/src/mongo/db/storage/key_string_test.cpp b/src/mongo/db/storage/key_string_test.cpp
index f5f8b50c47c..86055a38fd1 100644
--- a/src/mongo/db/storage/key_string_test.cpp
+++ b/src/mongo/db/storage/key_string_test.cpp
@@ -524,7 +524,6 @@ TEST_F(KeyStringTest, LotsOfNumbers3) {
for (double k = 0; k < 8; k++) {
futures.push_back(stdx::async(stdx::launch::async, [k, this] {
-
for (double i = -1100; i < 1100; i++) {
for (double j = 0; j < 52; j++) {
const auto V1 = KeyString::Version::V1;
@@ -746,10 +745,8 @@ const std::vector<BSONObj>& getInterestingElements(KeyString::Version version) {
// Something with exceptional typeBits for Decimal
elements.push_back(
BSON("" << BSON_ARRAY("" << BSONSymbol("") << Decimal128::kNegativeInfinity
- << Decimal128::kPositiveInfinity
- << Decimal128::kPositiveNaN
- << Decimal128("0.0000000")
- << Decimal128("-0E1000"))));
+ << Decimal128::kPositiveInfinity << Decimal128::kPositiveNaN
+ << Decimal128("0.0000000") << Decimal128("-0E1000"))));
}
//
diff --git a/src/mongo/db/storage/kv/durable_catalog_test.cpp b/src/mongo/db/storage/kv/durable_catalog_test.cpp
index 6daff729d9d..2d1706237fc 100644
--- a/src/mongo/db/storage/kv/durable_catalog_test.cpp
+++ b/src/mongo/db/storage/kv/durable_catalog_test.cpp
@@ -129,8 +129,7 @@ public:
bool match = (expected == actual);
if (!match) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expected) << ", "
- << "Actual: "
- << dumpMultikeyPaths(actual));
+ << "Actual: " << dumpMultikeyPaths(actual));
}
ASSERT(match);
}
diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h
index 75a065fee40..12054cb6ec0 100644
--- a/src/mongo/db/storage/kv/kv_engine.h
+++ b/src/mongo/db/storage/kv/kv_engine.h
@@ -439,4 +439,4 @@ protected:
*/
const int64_t kDefaultCappedSizeBytes = 4096;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index fc53d677e1d..48310040e62 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -168,9 +168,7 @@ TEST(KVEngineTestHarness, SimpleSorted1) {
IndexDescriptor desc(collection.get(),
"",
BSON("v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion) << "ns"
- << ns.ns()
- << "key"
- << BSON("a" << 1)));
+ << ns.ns() << "key" << BSON("a" << 1)));
std::unique_ptr<SortedDataInterface> sorted;
{
MyOperationContext opCtx(engine);
@@ -706,10 +704,7 @@ DEATH_TEST_F(DurableCatalogImplTest, TerminateOnNonNumericIndexVersion, "Fatal A
"",
BSON("v"
<< "1"
- << "ns"
- << ns.ns()
- << "key"
- << BSON("a" << 1)));
+ << "ns" << ns.ns() << "key" << BSON("a" << 1)));
std::unique_ptr<SortedDataInterface> sorted;
{
MyOperationContext opCtx(engine);
diff --git a/src/mongo/db/storage/kv/kv_prefix.cpp b/src/mongo/db/storage/kv/kv_prefix.cpp
index 078446493bc..6b88dc22c3b 100644
--- a/src/mongo/db/storage/kv/kv_prefix.cpp
+++ b/src/mongo/db/storage/kv/kv_prefix.cpp
@@ -70,4 +70,4 @@ std::string KVPrefix::toString() const {
stdx::lock_guard<stdx::mutex> lk(_nextValueMutex);
return KVPrefix(_nextValue++);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/kv/kv_prefix.h b/src/mongo/db/storage/kv/kv_prefix.h
index ee35720cbe5..6a785dc19db 100644
--- a/src/mongo/db/storage/kv/kv_prefix.h
+++ b/src/mongo/db/storage/kv/kv_prefix.h
@@ -100,4 +100,4 @@ private:
inline std::ostream& operator<<(std::ostream& s, const KVPrefix& prefix) {
return (s << prefix.toString());
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/kv/temporary_kv_record_store.h b/src/mongo/db/storage/kv/temporary_kv_record_store.h
index a992ca69cd4..f4b7c6033bd 100644
--- a/src/mongo/db/storage/kv/temporary_kv_record_store.h
+++ b/src/mongo/db/storage/kv/temporary_kv_record_store.h
@@ -53,8 +53,7 @@ public:
// Move constructor.
TemporaryKVRecordStore(TemporaryKVRecordStore&& other) noexcept
- : TemporaryRecordStore(std::move(other._rs)),
- _kvEngine(other._kvEngine) {}
+ : TemporaryRecordStore(std::move(other._rs)), _kvEngine(other._kvEngine) {}
~TemporaryKVRecordStore();
diff --git a/src/mongo/db/storage/mobile/mobile_session_pool.h b/src/mongo/db/storage/mobile/mobile_session_pool.h
index 605117e6983..08586e0ece8 100644
--- a/src/mongo/db/storage/mobile/mobile_session_pool.h
+++ b/src/mongo/db/storage/mobile/mobile_session_pool.h
@@ -102,8 +102,8 @@ public:
private:
/**
- * Gets the front element from _sessions and then pops it off the queue.
- */
+ * Gets the front element from _sessions and then pops it off the queue.
+ */
sqlite3* _popSession_inlock();
// This is used to lock the _sessions vector.
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index 1ba6dfda971..ab8752a2d93 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -623,4 +623,4 @@ public:
const RecordData& recordData,
size_t* dataSize) = 0;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp
index 6a72d25e954..9c1578c260b 100644
--- a/src/mongo/db/storage/record_store_test_harness.cpp
+++ b/src/mongo/db/storage/record_store_test_harness.cpp
@@ -38,8 +38,8 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
TEST(RecordStoreTestHarness, Simple1) {
const auto harnessHelper(newRecordStoreHarnessHelper());
@@ -115,7 +115,7 @@ public:
return false;
}
};
-}
+} // namespace
TEST(RecordStoreTestHarness, Simple1InsertDocWroter) {
diff --git a/src/mongo/db/storage/record_store_test_randomiter.cpp b/src/mongo/db/storage/record_store_test_randomiter.cpp
index c9c9757d827..dda51057e6d 100644
--- a/src/mongo/db/storage/record_store_test_randomiter.cpp
+++ b/src/mongo/db/storage/record_store_test_randomiter.cpp
@@ -38,10 +38,10 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::set;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Create a random iterator for empty record store.
TEST(RecordStoreTestHarness, GetRandomIteratorEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_recorditer.cpp b/src/mongo/db/storage/record_store_test_recorditer.cpp
index 38a5f356aad..c50ebba023c 100644
--- a/src/mongo/db/storage/record_store_test_recorditer.cpp
+++ b/src/mongo/db/storage/record_store_test_recorditer.cpp
@@ -42,9 +42,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Insert multiple records and iterate through them in the forward direction.
// When curr() or getNext() is called on an iterator positioned at EOF,
diff --git a/src/mongo/db/storage/record_store_test_recordstore.cpp b/src/mongo/db/storage/record_store_test_recordstore.cpp
index c5a95f250c2..00ed5598017 100644
--- a/src/mongo/db/storage/record_store_test_recordstore.cpp
+++ b/src/mongo/db/storage/record_store_test_recordstore.cpp
@@ -38,8 +38,8 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
// Verify that the name of the record store is not NULL and nonempty.
TEST(RecordStoreTestHarness, RecordStoreName) {
diff --git a/src/mongo/db/storage/record_store_test_repairiter.cpp b/src/mongo/db/storage/record_store_test_repairiter.cpp
index 74aa0237cbc..cad095d0286 100644
--- a/src/mongo/db/storage/record_store_test_repairiter.cpp
+++ b/src/mongo/db/storage/record_store_test_repairiter.cpp
@@ -40,10 +40,10 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::set;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Create an iterator for repairing an empty record store.
TEST(RecordStoreTestHarness, GetIteratorForRepairEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_storagesize.cpp b/src/mongo/db/storage/record_store_test_storagesize.cpp
index 743559a1079..5b54853cab8 100644
--- a/src/mongo/db/storage/record_store_test_storagesize.cpp
+++ b/src/mongo/db/storage/record_store_test_storagesize.cpp
@@ -38,9 +38,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Verify that a nonempty collection maybe takes up some space on disk.
TEST(RecordStoreTestHarness, StorageSizeNonEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_touch.cpp b/src/mongo/db/storage/record_store_test_touch.cpp
index 43b52b39ba2..3f3ccc34ee1 100644
--- a/src/mongo/db/storage/record_store_test_touch.cpp
+++ b/src/mongo/db/storage/record_store_test_touch.cpp
@@ -38,9 +38,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Verify that calling touch() on an empty collection returns an OK status.
TEST(RecordStoreTestHarness, TouchEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_truncate.cpp b/src/mongo/db/storage/record_store_test_truncate.cpp
index d05e3e9a117..a37c9a6681c 100644
--- a/src/mongo/db/storage/record_store_test_truncate.cpp
+++ b/src/mongo/db/storage/record_store_test_truncate.cpp
@@ -38,9 +38,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Verify that calling truncate() on an already empty collection returns an OK status.
TEST(RecordStoreTestHarness, TruncateEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_updaterecord.cpp b/src/mongo/db/storage/record_store_test_updaterecord.cpp
index d6f16586cde..b07d215cfa0 100644
--- a/src/mongo/db/storage/record_store_test_updaterecord.cpp
+++ b/src/mongo/db/storage/record_store_test_updaterecord.cpp
@@ -38,9 +38,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Insert a record and try to update it.
TEST(RecordStoreTestHarness, UpdateRecord) {
diff --git a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
index 298685c7285..9753e7d76b6 100644
--- a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
+++ b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
@@ -40,8 +40,8 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
// Insert a record and try to perform an in-place update on it.
TEST(RecordStoreTestHarness, UpdateWithDamages) {
diff --git a/src/mongo/db/storage/remove_saver.cpp b/src/mongo/db/storage/remove_saver.cpp
index d49e1cc5dd5..e24f33ecadd 100644
--- a/src/mongo/db/storage/remove_saver.cpp
+++ b/src/mongo/db/storage/remove_saver.cpp
@@ -45,8 +45,8 @@
using std::ios_base;
using std::ofstream;
-using std::stringstream;
using std::string;
+using std::stringstream;
namespace mongo {
diff --git a/src/mongo/db/storage/snapshot.h b/src/mongo/db/storage/snapshot.h
index 57045aae502..d169e4dada0 100644
--- a/src/mongo/db/storage/snapshot.h
+++ b/src/mongo/db/storage/snapshot.h
@@ -93,4 +93,4 @@ private:
SnapshotId _id;
T _value;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
index 895fc5560d9..20aba3337b3 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
@@ -40,12 +40,15 @@ namespace {
void testSetEndPosition_Next_Forward(bool unique, bool inclusive) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(
- unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ {key4, loc1},
+ {key5, loc1},
+ });
// Dup key on end point. Illegal for unique indexes.
if (!unique)
@@ -80,12 +83,15 @@ TEST(SortedDataInterface, SetEndPosition_Next_Forward_Standard_Exclusive) {
void testSetEndPosition_Next_Reverse(bool unique, bool inclusive) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(
- unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ {key4, loc1},
+ {key5, loc1},
+ });
// Dup key on end point. Illegal for unique indexes.
if (!unique)
@@ -220,12 +226,14 @@ TEST(SortedDataInterface, SetEndPosition_Seek_Reverse_Standard_Exclusive) {
void testSetEndPosition_Restore_Forward(bool unique) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(
- unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ {key4, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
cursor->setEndPosition(key3, false); // Should never see key3 or key4.
@@ -241,7 +249,8 @@ void testSetEndPosition_Restore_Forward(bool unique) {
removeFromIndex(opCtx,
sorted,
{
- {key2, loc1}, {key3, loc1},
+ {key2, loc1},
+ {key3, loc1},
});
cursor->restore();
@@ -257,12 +266,14 @@ TEST(SortedDataInterface, SetEndPosition_Restore_Forward_Standard) {
void testSetEndPosition_Restore_Reverse(bool unique) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(
- unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ {key4, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
cursor->setEndPosition(key2, false); // Should never see key1 or key2.
@@ -278,7 +289,8 @@ void testSetEndPosition_Restore_Reverse(bool unique) {
removeFromIndex(opCtx,
sorted,
{
- {key2, loc1}, {key3, loc1},
+ {key2, loc1},
+ {key3, loc1},
});
cursor->restore();
@@ -302,7 +314,8 @@ void testSetEndPosition_RestoreEndCursor_Forward(bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(unique,
/*partial=*/false,
{
- {key1, loc1}, {key4, loc1},
+ {key1, loc1},
+ {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -337,7 +350,8 @@ void testSetEndPosition_RestoreEndCursor_Reverse(bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(unique,
/*partial=*/false,
{
- {key1, loc1}, {key4, loc1},
+ {key1, loc1},
+ {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -370,12 +384,13 @@ TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Reverse_Unique) {
void testSetEndPosition_Empty_Forward(bool unique, bool inclusive) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
cursor->setEndPosition(BSONObj(), inclusive);
@@ -401,12 +416,13 @@ TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Standard_Exclusive) {
void testSetEndPosition_Empty_Reverse(bool unique, bool inclusive) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
cursor->setEndPosition(BSONObj(), inclusive);
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
index 88a43ed0005..30d207d5031 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
@@ -300,7 +300,8 @@ void testSaveAndRestorePositionSeesNewInserts(bool forward, bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(unique,
/*partial=*/false,
{
- {key1, loc1}, {key3, loc1},
+ {key1, loc1},
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -335,7 +336,8 @@ void testSaveAndRestorePositionSeesNewInsertsAfterRemove(bool forward, bool uniq
auto sorted = harnessHelper->newSortedDataInterface(unique,
/*partial=*/false,
{
- {key1, loc1}, {key3, loc1},
+ {key1, loc1},
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -414,12 +416,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Reverse_S
TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_Forward) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(/*unique*/ false,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(/*unique*/ false,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
@@ -497,12 +500,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_For
TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_Reverse) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(/*unique*/ false,
- /*partial=*/false,
- {
- {key0, loc1}, {key1, loc1}, {key2, loc2},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(/*unique*/ false,
+ /*partial=*/false,
+ {
+ {key0, loc1},
+ {key1, loc1},
+ {key2, loc2},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -580,12 +584,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_Rev
TEST(SortedDataInterface, SaveUnpositionedAndRestore) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(/*unique=*/false,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(/*unique=*/false,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
index fa608652d8f..4a0584e0559 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
@@ -40,12 +40,13 @@ namespace {
void testSeekExact_Hit(bool unique, bool forward) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -111,7 +112,10 @@ TEST(SortedDataInterface, SeekExact_HitWithDups_Forward) {
/*unique=*/false,
/*partial=*/false,
{
- {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
+ {key1, loc1},
+ {key2, loc1},
+ {key2, loc2},
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -131,7 +135,10 @@ TEST(SortedDataInterface, SeekExact_HitWithDups_Reverse) {
/*unique=*/false,
/*partial=*/false,
{
- {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
+ {key1, loc1},
+ {key2, loc1},
+ {key2, loc2},
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index d8f6f764e27..a1a68e421dc 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -151,8 +151,8 @@ public:
};
/**
- * The destructor should only be called if we are tearing down but not exiting the process.
- */
+ * The destructor should only be called if we are tearing down but not exiting the process.
+ */
virtual ~StorageEngine() {}
/**
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index f485102aa23..72b53bd7fb9 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -92,8 +92,8 @@ void StorageEngineImpl::loadCatalog(OperationContext* opCtx) {
if (status.code() == ErrorCodes::DataModifiedByRepair) {
warning() << "Catalog data modified by repair: " << status.reason();
- repairObserver->onModification(str::stream() << "DurableCatalog repaired: "
- << status.reason());
+ repairObserver->onModification(str::stream()
+ << "DurableCatalog repaired: " << status.reason());
} else {
fassertNoTrace(50926, status);
}
@@ -209,8 +209,8 @@ void StorageEngineImpl::loadCatalog(OperationContext* opCtx) {
if (_options.forRepair) {
StorageRepairObserver::get(getGlobalServiceContext())
- ->onModification(str::stream() << "Collection " << nss << " dropped: "
- << status.reason());
+ ->onModification(str::stream() << "Collection " << nss
+ << " dropped: " << status.reason());
}
wuow.commit();
continue;
@@ -298,8 +298,8 @@ Status StorageEngineImpl::_recoverOrphanedCollection(OperationContext* opCtx,
}
if (dataModified) {
StorageRepairObserver::get(getGlobalServiceContext())
- ->onModification(str::stream() << "Collection " << collectionName << " recovered: "
- << status.reason());
+ ->onModification(str::stream() << "Collection " << collectionName
+ << " recovered: " << status.reason());
}
wuow.commit();
return Status::OK();
@@ -397,8 +397,7 @@ StorageEngineImpl::reconcileCatalogAndIdents(OperationContext* opCtx) {
if (engineIdents.find(identForColl) == engineIdents.end()) {
return {ErrorCodes::UnrecoverableRollbackError,
str::stream() << "Expected collection does not exist. Collection: " << coll
- << " Ident: "
- << identForColl};
+ << " Ident: " << identForColl};
}
}
}
@@ -494,8 +493,8 @@ StorageEngineImpl::reconcileCatalogAndIdents(OperationContext* opCtx) {
for (auto&& indexName : indexesToDrop) {
invariant(metaData.eraseIndex(indexName),
- str::stream() << "Index is missing. Collection: " << coll << " Index: "
- << indexName);
+ str::stream()
+ << "Index is missing. Collection: " << coll << " Index: " << indexName);
}
if (indexesToDrop.size() > 0) {
WriteUnitOfWork wuow(opCtx);
@@ -683,8 +682,8 @@ Status StorageEngineImpl::repairRecordStore(OperationContext* opCtx, const Names
}
if (dataModified) {
- repairObserver->onModification(str::stream() << "Collection " << nss << ": "
- << status.reason());
+ repairObserver->onModification(str::stream()
+ << "Collection " << nss << ": " << status.reason());
}
// After repairing, re-initialize the collection with a valid RecordStore.
@@ -819,8 +818,8 @@ void StorageEngineImpl::_dumpCatalog(OperationContext* opCtx) {
while (rec) {
// This should only be called by a parent that's done an appropriate `shouldLog` check. Do
// not duplicate the log level policy.
- LOG_FOR_RECOVERY(kCatalogLogLevel) << "\tId: " << rec->id
- << " Value: " << rec->data.toBson();
+ LOG_FOR_RECOVERY(kCatalogLogLevel)
+ << "\tId: " << rec->id << " Value: " << rec->data.toBson();
rec = cursor->next();
}
opCtx->recoveryUnit()->abandonSnapshot();
diff --git a/src/mongo/db/storage/storage_engine_init.cpp b/src/mongo/db/storage/storage_engine_init.cpp
index fd7701b8c8d..7418219ec8c 100644
--- a/src/mongo/db/storage/storage_engine_init.cpp
+++ b/src/mongo/db/storage/storage_engine_init.cpp
@@ -106,14 +106,12 @@ void initializeStorageEngine(ServiceContext* service, const StorageEngineInitFla
getFactoryForStorageEngine(service, storageGlobalParams.engine);
if (factory) {
uassert(28662,
- str::stream() << "Cannot start server. Detected data files in " << dbpath
- << " created by"
- << " the '"
- << *existingStorageEngine
- << "' storage engine, but the"
- << " specified storage engine was '"
- << factory->getCanonicalName()
- << "'.",
+ str::stream()
+ << "Cannot start server. Detected data files in " << dbpath
+ << " created by"
+ << " the '" << *existingStorageEngine << "' storage engine, but the"
+ << " specified storage engine was '" << factory->getCanonicalName()
+ << "'.",
factory->getCanonicalName() == *existingStorageEngine);
}
} else {
@@ -156,8 +154,7 @@ void initializeStorageEngine(ServiceContext* service, const StorageEngineInitFla
uassert(34368,
str::stream()
<< "Server was started in read-only mode, but the configured storage engine, "
- << storageGlobalParams.engine
- << ", does not support read-only operation",
+ << storageGlobalParams.engine << ", does not support read-only operation",
factory->supportsReadOnly());
}
@@ -223,9 +220,7 @@ void createLockFile(ServiceContext* service) {
} catch (const std::exception& ex) {
uassert(28596,
str::stream() << "Unable to determine status of lock file in the data directory "
- << storageGlobalParams.dbpath
- << ": "
- << ex.what(),
+ << storageGlobalParams.dbpath << ": " << ex.what(),
false);
}
const bool wasUnclean = lockFile->createdByUncleanShutdown();
diff --git a/src/mongo/db/storage/storage_engine_interface.h b/src/mongo/db/storage/storage_engine_interface.h
index db7201c1492..77703c8aa6f 100644
--- a/src/mongo/db/storage/storage_engine_interface.h
+++ b/src/mongo/db/storage/storage_engine_interface.h
@@ -46,4 +46,4 @@ public:
StringData ident) = 0;
virtual DurableCatalog* getCatalog() = 0;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
index b39b0503547..c0398eddec3 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
@@ -67,8 +67,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
massert(40387,
- str::stream() << "Couldn't open directory '" << dir.string() << "' for flushing: "
- << errnoWithDescription(),
+ str::stream() << "Couldn't open directory '" << dir.string()
+ << "' for flushing: " << errnoWithDescription(),
fd >= 0);
if (fsync(fd) != 0) {
int e = errno;
@@ -85,8 +85,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
} else {
close(fd);
massert(40388,
- str::stream() << "Couldn't fsync directory '" << dir.string() << "': "
- << errnoWithDescription(e),
+ str::stream() << "Couldn't fsync directory '" << dir.string()
+ << "': " << errnoWithDescription(e),
false);
}
}
@@ -136,8 +136,7 @@ Status StorageEngineLockFile::open() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Unable to check existence of data directory " << _dbpath
- << ": "
- << ex.what());
+ << ": " << ex.what());
}
// Use file permissions 644
@@ -153,13 +152,11 @@ Status StorageEngineLockFile::open() {
}
return Status(ErrorCodes::DBPathInUse,
str::stream() << "Unable to create/open the lock file: " << _filespec << " ("
- << errnoWithDescription(errorcode)
- << ")."
+ << errnoWithDescription(errorcode) << ")."
<< " Ensure the user executing mongod is the owner of the lock "
"file and has the appropriate permissions. Also make sure "
"that another mongod instance is not already running on the "
- << _dbpath
- << " directory");
+ << _dbpath << " directory");
}
int ret = ::flock(lockFile, LOCK_EX | LOCK_NB);
if (ret != 0) {
@@ -167,11 +164,9 @@ Status StorageEngineLockFile::open() {
::close(lockFile);
return Status(ErrorCodes::DBPathInUse,
str::stream() << "Unable to lock the lock file: " << _filespec << " ("
- << errnoWithDescription(errorcode)
- << ")."
+ << errnoWithDescription(errorcode) << ")."
<< " Another mongod instance is already running on the "
- << _dbpath
- << " directory");
+ << _dbpath << " directory");
}
_lockFileHandle->_fd = lockFile;
return Status::OK();
@@ -197,9 +192,7 @@ Status StorageEngineLockFile::writeString(StringData str) {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write string to file (ftruncate failed): "
- << _filespec
- << ' '
- << errnoWithDescription(errorcode));
+ << _filespec << ' ' << errnoWithDescription(errorcode));
}
int bytesWritten = ::write(_lockFileHandle->_fd, str.rawData(), str.size());
@@ -207,8 +200,7 @@ Status StorageEngineLockFile::writeString(StringData str) {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write string " << str << " to file: " << _filespec
- << ' '
- << errnoWithDescription(errorcode));
+ << ' ' << errnoWithDescription(errorcode));
} else if (bytesWritten == 0) {
return Status(ErrorCodes::FileStreamFailed,
@@ -220,9 +212,7 @@ Status StorageEngineLockFile::writeString(StringData str) {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << str
- << " to file (fsync failed): "
- << _filespec
- << ' '
+ << " to file (fsync failed): " << _filespec << ' '
<< errnoWithDescription(errorcode));
}
diff --git a/src/mongo/db/storage/storage_engine_lock_file_test.cpp b/src/mongo/db/storage/storage_engine_lock_file_test.cpp
index 153040ef874..df4967e2d41 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_test.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_test.cpp
@@ -46,8 +46,8 @@
namespace {
-using std::string;
using mongo::unittest::TempDir;
+using std::string;
using namespace mongo;
diff --git a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
index 2be6f11bb03..4055318d1d8 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
@@ -108,8 +108,7 @@ Status StorageEngineLockFile::open() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Unable to check existence of data directory " << _dbpath
- << ": "
- << ex.what());
+ << ": " << ex.what());
}
HANDLE lockFileHandle = CreateFileW(toNativeString(_filespec.c_str()).c_str(),
@@ -130,13 +129,11 @@ Status StorageEngineLockFile::open() {
}
return Status(ErrorCodes::DBPathInUse,
str::stream() << "Unable to create/open the lock file: " << _filespec << " ("
- << errnoWithDescription(errorcode)
- << ")."
+ << errnoWithDescription(errorcode) << ")."
<< " Ensure the user executing mongod is the owner of the lock "
"file and has the appropriate permissions. Also make sure "
"that another mongod instance is not already running on the "
- << _dbpath
- << " directory");
+ << _dbpath << " directory");
}
_lockFileHandle->_handle = lockFileHandle;
return Status::OK();
@@ -171,8 +168,7 @@ Status StorageEngineLockFile::writeString(StringData str) {
int errorcode = GetLastError();
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write string " << str << " to file: " << _filespec
- << ' '
- << errnoWithDescription(errorcode));
+ << ' ' << errnoWithDescription(errorcode));
} else if (bytesWritten == 0) {
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write string " << str << " to file: " << _filespec
diff --git a/src/mongo/db/storage/storage_engine_metadata.cpp b/src/mongo/db/storage/storage_engine_metadata.cpp
index 62fecc4c102..ecf401f3ee9 100644
--- a/src/mongo/db/storage/storage_engine_metadata.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata.cpp
@@ -142,13 +142,13 @@ Status StorageEngineMetadata::read() {
boost::uintmax_t fileSize = boost::filesystem::file_size(metadataPath);
if (fileSize == 0) {
return Status(ErrorCodes::InvalidPath,
- str::stream() << "Metadata file " << metadataPath.string()
- << " cannot be empty.");
+ str::stream()
+ << "Metadata file " << metadataPath.string() << " cannot be empty.");
}
if (fileSize == static_cast<boost::uintmax_t>(-1)) {
return Status(ErrorCodes::InvalidPath,
- str::stream() << "Unable to determine size of metadata file "
- << metadataPath.string());
+ str::stream()
+ << "Unable to determine size of metadata file " << metadataPath.string());
}
std::vector<char> buffer(fileSize);
@@ -156,23 +156,21 @@ Status StorageEngineMetadata::read() {
std::ifstream ifs(metadataPath.c_str(), std::ios_base::in | std::ios_base::binary);
if (!ifs) {
return Status(ErrorCodes::FileNotOpen,
- str::stream() << "Failed to read metadata from "
- << metadataPath.string());
+ str::stream()
+ << "Failed to read metadata from " << metadataPath.string());
}
// Read BSON from file
ifs.read(&buffer[0], buffer.size());
if (!ifs) {
return Status(ErrorCodes::FileStreamFailed,
- str::stream() << "Unable to read BSON data from "
- << metadataPath.string());
+ str::stream()
+ << "Unable to read BSON data from " << metadataPath.string());
}
} catch (const std::exception& ex) {
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unexpected error reading BSON data from "
- << metadataPath.string()
- << ": "
- << ex.what());
+ << metadataPath.string() << ": " << ex.what());
}
ConstDataRange cdr(&buffer[0], buffer.size());
@@ -232,8 +230,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
massert(13650,
- str::stream() << "Couldn't open directory '" << dir.string() << "' for flushing: "
- << errnoWithDescription(),
+ str::stream() << "Couldn't open directory '" << dir.string()
+ << "' for flushing: " << errnoWithDescription(),
fd >= 0);
if (fsync(fd) != 0) {
int e = errno;
@@ -250,8 +248,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
} else {
close(fd);
massert(13651,
- str::stream() << "Couldn't fsync directory '" << dir.string() << "': "
- << errnoWithDescription(e),
+ str::stream() << "Couldn't fsync directory '" << dir.string()
+ << "': " << errnoWithDescription(e),
false);
}
}
@@ -270,9 +268,9 @@ Status StorageEngineMetadata::write() const {
{
std::ofstream ofs(metadataTempPath.c_str(), std::ios_base::out | std::ios_base::binary);
if (!ofs) {
- return Status(
- ErrorCodes::FileNotOpen,
- str::stream() << "Failed to write metadata to " << metadataTempPath.string() << ": "
+ return Status(ErrorCodes::FileNotOpen,
+ str::stream()
+ << "Failed to write metadata to " << metadataTempPath.string() << ": "
<< errnoWithDescription());
}
@@ -281,10 +279,9 @@ Status StorageEngineMetadata::write() const {
ofs.write(obj.objdata(), obj.objsize());
if (!ofs) {
return Status(ErrorCodes::OperationFailed,
- str::stream() << "Failed to write BSON data to "
- << metadataTempPath.string()
- << ": "
- << errnoWithDescription());
+ str::stream()
+ << "Failed to write BSON data to " << metadataTempPath.string()
+ << ": " << errnoWithDescription());
}
}
@@ -304,11 +301,8 @@ Status StorageEngineMetadata::write() const {
} catch (const std::exception& ex) {
return Status(ErrorCodes::FileRenameFailed,
str::stream() << "Unexpected error while renaming temporary metadata file "
- << metadataTempPath.string()
- << " to "
- << metadataPath.string()
- << ": "
- << ex.what());
+ << metadataTempPath.string() << " to " << metadataPath.string()
+ << ": " << ex.what());
}
return Status::OK();
@@ -324,21 +318,16 @@ Status StorageEngineMetadata::validateStorageEngineOption<bool>(
ErrorCodes::InvalidOptions,
str::stream()
<< "Requested option conflicts with the current storage engine option for "
- << fieldName
- << "; you requested "
- << (expectedValue ? "true" : "false")
+ << fieldName << "; you requested " << (expectedValue ? "true" : "false")
<< " but the current server storage is implicitly set to "
- << (*defaultValue ? "true" : "false")
- << " and cannot be changed");
+ << (*defaultValue ? "true" : "false") << " and cannot be changed");
}
return Status::OK();
}
if (!element.isBoolean()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Expected boolean field " << fieldName << " but got "
- << typeName(element.type())
- << " instead: "
- << element);
+ << typeName(element.type()) << " instead: " << element);
}
if (element.boolean() == expectedValue) {
return Status::OK();
@@ -346,12 +335,9 @@ Status StorageEngineMetadata::validateStorageEngineOption<bool>(
return Status(
ErrorCodes::InvalidOptions,
str::stream() << "Requested option conflicts with current storage engine option for "
- << fieldName
- << "; you requested "
- << (expectedValue ? "true" : "false")
+ << fieldName << "; you requested " << (expectedValue ? "true" : "false")
<< " but the current server storage is already set to "
- << (element.boolean() ? "true" : "false")
- << " and cannot be changed");
+ << (element.boolean() ? "true" : "false") << " and cannot be changed");
}
} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_metadata_test.cpp b/src/mongo/db/storage/storage_engine_metadata_test.cpp
index 0e1e59dc5c2..57e386644d6 100644
--- a/src/mongo/db/storage/storage_engine_metadata_test.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata_test.cpp
@@ -44,8 +44,8 @@
namespace {
-using std::string;
using mongo::unittest::TempDir;
+using std::string;
using namespace mongo;
diff --git a/src/mongo/db/storage/storage_file_util.cpp b/src/mongo/db/storage/storage_file_util.cpp
index c267b292ee1..dd47a85642d 100644
--- a/src/mongo/db/storage/storage_file_util.cpp
+++ b/src/mongo/db/storage/storage_file_util.cpp
@@ -72,8 +72,8 @@ Status fsyncParentDirectory(const boost::filesystem::path& file) {
int fd = ::open(dir.string().c_str(), O_RDONLY);
if (fd < 0) {
return {ErrorCodes::FileOpenFailed,
- str::stream() << "Failed to open directory " << dir.string() << " for flushing: "
- << errnoWithDescription()};
+ str::stream() << "Failed to open directory " << dir.string()
+ << " for flushing: " << errnoWithDescription()};
}
if (fsync(fd) != 0) {
int e = errno;
@@ -82,8 +82,8 @@ Status fsyncParentDirectory(const boost::filesystem::path& file) {
} else {
close(fd);
return {ErrorCodes::OperationFailed,
- str::stream() << "Failed to fsync directory '" << dir.string() << "': "
- << errnoWithDescription(e)};
+ str::stream() << "Failed to fsync directory '" << dir.string()
+ << "': " << errnoWithDescription(e)};
}
}
close(fd);
@@ -102,9 +102,7 @@ Status fsyncRename(const boost::filesystem::path& source, const boost::filesyste
if (ec) {
return {ErrorCodes::FileRenameFailed,
str::stream() << "Error renaming data file from " << source.string() << " to "
- << dest.string()
- << ": "
- << ec.message()};
+ << dest.string() << ": " << ec.message()};
}
auto status = fsyncFile(dest);
if (!status.isOK()) {
diff --git a/src/mongo/db/storage/storage_init.cpp b/src/mongo/db/storage/storage_init.cpp
index fb1d025289d..1da860e1e0d 100644
--- a/src/mongo/db/storage/storage_init.cpp
+++ b/src/mongo/db/storage/storage_init.cpp
@@ -63,17 +63,12 @@ public:
<< (oldestRequiredTimestampForCrashRecovery
? *oldestRequiredTimestampForCrashRecovery
: Timestamp())
- << "supportsPendingDrops"
- << engine->supportsPendingDrops()
+ << "supportsPendingDrops" << engine->supportsPendingDrops()
<< "dropPendingIdents"
<< static_cast<long long>(engine->getDropPendingIdents().size())
- << "supportsSnapshotReadConcern"
- << engine->supportsReadConcernSnapshot()
- << "readOnly"
- << storageGlobalParams.readOnly
- << "persistent"
- << !engine->isEphemeral()
- << "backupCursorOpen"
+ << "supportsSnapshotReadConcern" << engine->supportsReadConcernSnapshot()
+ << "readOnly" << storageGlobalParams.readOnly << "persistent"
+ << !engine->isEphemeral() << "backupCursorOpen"
<< backupCursorHooks->isBackupCursorOpen());
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h
index abadc810e6a..2c321725173 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h
@@ -77,4 +77,4 @@ protected:
WT_CURSOR* _cursor = nullptr; // Owned
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index 502cc540aab..ac0a40b1958 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -145,8 +145,7 @@ StatusWith<std::string> WiredTigerIndex::parseIndexOptions(const BSONObj& option
// Return error on first unrecognized field.
return StatusWith<std::string>(ErrorCodes::InvalidOptions,
str::stream() << '\'' << elem.fieldNameStringData()
- << '\''
- << " is not a supported option.");
+ << '\'' << " is not a supported option.");
}
}
return StatusWith<std::string>(ss.str());
@@ -274,9 +273,7 @@ WiredTigerIndex::WiredTigerIndex(OperationContext* ctx,
Status indexVersionStatus(
ErrorCodes::UnsupportedFormat,
str::stream() << versionStatus.reason() << " Index: {name: " << desc->indexName()
- << ", ns: "
- << desc->parentNS()
- << "} - version too new for this mongod."
+ << ", ns: " << desc->parentNS() << "} - version too new for this mongod."
<< " See http://dochub.mongodb.org/core/4.2-downgrade-index for detailed"
<< " instructions on how to handle this error.");
fassertFailedWithStatusNoTrace(28579, indexVersionStatus);
@@ -346,10 +343,10 @@ void WiredTigerIndex::fullValidate(OperationContext* opCtx,
warning() << msg;
fullResults->warnings.push_back(msg);
} else if (err) {
- std::string msg = str::stream() << "verify() returned " << wiredtiger_strerror(err)
- << ". "
- << "This indicates structural damage. "
- << "Not examining individual index entries.";
+ std::string msg = str::stream()
+ << "verify() returned " << wiredtiger_strerror(err) << ". "
+ << "This indicates structural damage. "
+ << "Not examining individual index entries.";
error() << msg;
fullResults->errors.push_back(msg);
fullResults->valid = false;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
index e4a6d84c447..854c8799b67 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
@@ -291,4 +291,4 @@ public:
bool dupsAllowed) override;
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
index b84d3e812b1..f53623761d1 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
@@ -85,15 +85,9 @@ void _testValidateMetadata(const StorageEngine::Factory* factory,
if (expectedCode != status.code()) {
FAIL(str::stream()
<< "Unexpected StorageEngine::Factory::validateMetadata result. Expected: "
- << ErrorCodes::errorString(expectedCode)
- << " but got "
- << status.toString()
- << " instead. metadataOptions: "
- << metadataOptions
- << "; directoryPerDB: "
- << directoryPerDB
- << "; directoryForIndexes: "
- << directoryForIndexes);
+ << ErrorCodes::errorString(expectedCode) << " but got " << status.toString()
+ << " instead. metadataOptions: " << metadataOptions << "; directoryPerDB: "
+ << directoryPerDB << "; directoryForIndexes: " << directoryForIndexes);
}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 2b463a302d9..47d2e14e9dd 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -1756,8 +1756,7 @@ StatusWith<Timestamp> WiredTigerKVEngine::recoverToStableTimestamp(OperationCont
str::stream()
<< "No stable timestamp available to recover to. Initial data timestamp: "
<< initialDataTS.toString()
- << ", Stable timestamp: "
- << stableTS.toString());
+ << ", Stable timestamp: " << stableTS.toString());
}
LOG_FOR_ROLLBACK(2) << "WiredTiger::RecoverToStableTimestamp syncing size storer to disk.";
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index a3fee8cde25..b80f0698a59 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -473,4 +473,4 @@ private:
// timestamp. Provided by replication layer because WT does not persist timestamps.
AtomicWord<std::uint64_t> _initialDataTimestamp;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp
index 6e4cbf157ab..90292778505 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp
@@ -50,4 +50,4 @@ MONGO_STARTUP_OPTIONS_STORE(WiredTigerOptions)(InitializerContext* context) {
}
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
index 5cda75b3c2f..dcbbbf34a65 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
@@ -56,9 +56,8 @@ Status applyMaxCacheOverflowSizeGBParameter(WiredTigerMaxCacheOverflowSizeGBPara
int ret = param._data.second->reconfigure(
fmt::format("cache_overflow=(file_max={}M)", valueMB).c_str());
if (ret != 0) {
- string result =
- (str::stream() << "WiredTiger reconfiguration failed with error code (" << ret << "): "
- << wiredtiger_strerror(ret));
+ string result = (str::stream() << "WiredTiger reconfiguration failed with error code ("
+ << ret << "): " << wiredtiger_strerror(ret));
error() << result;
return Status(ErrorCodes::BadValue, result);
@@ -91,9 +90,8 @@ Status WiredTigerEngineRuntimeConfigParameter::setFromString(const std::string&
invariant(_data.second);
int ret = _data.second->reconfigure(str.c_str());
if (ret != 0) {
- string result =
- (str::stream() << "WiredTiger reconfiguration failed with error code (" << ret << "): "
- << wiredtiger_strerror(ret));
+ string result = (str::stream() << "WiredTiger reconfiguration failed with error code ("
+ << ret << "): " << wiredtiger_strerror(ret));
error() << result;
return Status(ErrorCodes::BadValue, result);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp
index 98d91659b4e..e5c44e02365 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp
@@ -77,12 +77,8 @@ public:
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
- << "v"
- << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
- << "ns"
- << ns
- << "unique"
- << unique);
+ << "v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
+ << "ns" << ns << "unique" << unique);
if (partial) {
auto partialBSON =
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp
index 76c8e5121a2..ac09e8d7574 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp
@@ -65,9 +65,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
class PrefixedWiredTigerHarnessHelper final : public RecordStoreHarnessHelper {
public:
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 627a21dec18..f05ce0457f8 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -71,8 +71,8 @@
namespace mongo {
using namespace fmt::literals;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
namespace {
@@ -480,8 +480,7 @@ StatusWith<std::string> WiredTigerRecordStore::parseOptionsField(const BSONObj o
// Return error on first unrecognized field.
return StatusWith<std::string>(ErrorCodes::InvalidOptions,
str::stream() << '\'' << elem.fieldNameStringData()
- << '\''
- << " is not a supported option.");
+ << '\'' << " is not a supported option.");
}
}
return StatusWith<std::string>(ss.str());
@@ -644,10 +643,11 @@ WiredTigerRecordStore::WiredTigerRecordStore(WiredTigerKVEngine* kvEngine,
_engineName(params.engineName),
_isCapped(params.isCapped),
_isEphemeral(params.isEphemeral),
- _isLogged(!isTemp() && WiredTigerUtil::useTableLogging(
- NamespaceString(ns()),
- getGlobalReplSettings().usingReplSets() ||
- repl::ReplSettings::shouldRecoverFromOplogAsStandalone())),
+ _isLogged(!isTemp() &&
+ WiredTigerUtil::useTableLogging(
+ NamespaceString(ns()),
+ getGlobalReplSettings().usingReplSets() ||
+ repl::ReplSettings::shouldRecoverFromOplogAsStandalone())),
_isOplog(NamespaceString::oplog(params.ns)),
_cappedMaxSize(params.cappedMaxSize),
_cappedMaxSizeSlack(std::min(params.cappedMaxSize / 10, int64_t(16 * 1024 * 1024))),
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 93e0f221432..b9c323678b3 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -529,4 +529,4 @@ MONGO_FAIL_POINT_DECLARE(WTWriteConflictExceptionForReads);
// will not be considered durable until deactivated. It is unspecified whether writes that commit
// before activation will become visible while active.
MONGO_FAIL_POINT_DECLARE(WTPausePrimaryOplogDurabilityLoop);
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index 4c7dcf641a6..ebc51a57ef0 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -57,9 +57,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
TEST(WiredTigerRecordStoreTest, GenerateCreateStringEmptyDocument) {
BSONObj spec = fromjson("{}");
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index 79126850f5c..f049f5f57a0 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -420,8 +420,7 @@ void WiredTigerRecoveryUnit::_txnClose(bool commit) {
str::stream() << "Cannot have both a _lastTimestampSet and a "
"_commitTimestamp. _lastTimestampSet: "
<< _lastTimestampSet->toString()
- << ". _commitTimestamp: "
- << _commitTimestamp.toString());
+ << ". _commitTimestamp: " << _commitTimestamp.toString());
// We reset the _lastTimestampSet between transactions. Since it is legal for one
// transaction on a RecoveryUnit to call setTimestamp() and another to call
@@ -658,8 +657,7 @@ Status WiredTigerRecoveryUnit::setTimestamp(Timestamp timestamp) {
invariant(_prepareTimestamp.isNull());
invariant(_commitTimestamp.isNull(),
str::stream() << "Commit timestamp set to " << _commitTimestamp.toString()
- << " and trying to set WUOW timestamp to "
- << timestamp.toString());
+ << " and trying to set WUOW timestamp to " << timestamp.toString());
invariant(_readAtTimestamp.isNull() || timestamp >= _readAtTimestamp,
str::stream() << "future commit timestamp " << timestamp.toString()
<< " cannot be older than read timestamp "
@@ -686,12 +684,10 @@ void WiredTigerRecoveryUnit::setCommitTimestamp(Timestamp timestamp) {
invariant(!_inUnitOfWork() || !_prepareTimestamp.isNull(), toString(_state));
invariant(_commitTimestamp.isNull(),
str::stream() << "Commit timestamp set to " << _commitTimestamp.toString()
- << " and trying to set it to "
- << timestamp.toString());
+ << " and trying to set it to " << timestamp.toString());
invariant(!_lastTimestampSet,
str::stream() << "Last timestamp set is " << _lastTimestampSet->toString()
- << " and trying to set commit timestamp to "
- << timestamp.toString());
+ << " and trying to set commit timestamp to " << timestamp.toString());
invariant(!_isTimestamped);
_commitTimestamp = timestamp;
@@ -705,9 +701,7 @@ void WiredTigerRecoveryUnit::setDurableTimestamp(Timestamp timestamp) {
invariant(
_durableTimestamp.isNull(),
str::stream() << "Trying to reset durable timestamp when it was already set. wasSetTo: "
- << _durableTimestamp.toString()
- << " setTo: "
- << timestamp.toString());
+ << _durableTimestamp.toString() << " setTo: " << timestamp.toString());
_durableTimestamp = timestamp;
}
@@ -731,16 +725,13 @@ void WiredTigerRecoveryUnit::setPrepareTimestamp(Timestamp timestamp) {
invariant(_inUnitOfWork(), toString(_state));
invariant(_prepareTimestamp.isNull(),
str::stream() << "Trying to set prepare timestamp to " << timestamp.toString()
- << ". It's already set to "
- << _prepareTimestamp.toString());
+ << ". It's already set to " << _prepareTimestamp.toString());
invariant(_commitTimestamp.isNull(),
str::stream() << "Commit timestamp is " << _commitTimestamp.toString()
- << " and trying to set prepare timestamp to "
- << timestamp.toString());
+ << " and trying to set prepare timestamp to " << timestamp.toString());
invariant(!_lastTimestampSet,
str::stream() << "Last timestamp set is " << _lastTimestampSet->toString()
- << " and trying to set prepare timestamp to "
- << timestamp.toString());
+ << " and trying to set prepare timestamp to " << timestamp.toString());
_prepareTimestamp = timestamp;
}
@@ -780,8 +771,7 @@ void WiredTigerRecoveryUnit::setRoundUpPreparedTimestamps(bool value) {
// This cannot be called after WiredTigerRecoveryUnit::_txnOpen.
invariant(!_isActive(),
str::stream() << "Can't change round up prepared timestamps flag "
- << "when current state is "
- << toString(_state));
+ << "when current state is " << toString(_state));
_roundUpPreparedTimestamps =
(value) ? RoundUpPreparedTimestamps::kRound : RoundUpPreparedTimestamps::kNoRound;
}
@@ -794,8 +784,7 @@ void WiredTigerRecoveryUnit::setTimestampReadSource(ReadSource readSource,
invariant(!_isActive() || _timestampReadSource == readSource,
str::stream() << "Current state: " << toString(_state)
<< ". Invalid internal state while setting timestamp read source: "
- << static_cast<int>(readSource)
- << ", provided timestamp: "
+ << static_cast<int>(readSource) << ", provided timestamp: "
<< (provided ? provided->toString() : "none"));
invariant(!provided == (readSource != ReadSource::kProvided));
invariant(!(provided && provided->isNull()));
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
index df5e5935b8f..db32a031cb5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
@@ -60,7 +60,7 @@ public:
false, // .ephemeral
false, // .repair
false // .readOnly
- ) {
+ ) {
repl::ReplicationCoordinator::set(
getGlobalServiceContext(),
std::unique_ptr<repl::ReplicationCoordinator>(new repl::ReplicationCoordinatorMock(
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
index ace8580f465..afb2da1fbed 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
@@ -359,4 +359,4 @@ typedef std::unique_ptr<WiredTigerSession,
UniqueWiredTigerSession;
extern const std::string kWTRepairMsg;
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
index b9096b29279..5db2a4e72bc 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
@@ -103,4 +103,4 @@ private:
mutable stdx::mutex _bufferMutex; // Guards _buffer
Buffer _buffer;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
index c5f2fc17651..75c9777a502 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
@@ -98,4 +98,4 @@ private:
mutable stdx::mutex _localSnapshotMutex; // Guards _localSnapshot.
boost::optional<Timestamp> _localSnapshot;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp
index 77ce7b9f222..7349d5786f1 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp
@@ -77,12 +77,8 @@ public:
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
- << "v"
- << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
- << "ns"
- << ns
- << "unique"
- << unique);
+ << "v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
+ << "ns" << ns << "unique" << unique);
if (partial) {
auto partialBSON =
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
index 4e09d0fdd9b..754171418b6 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
@@ -64,9 +64,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
class WiredTigerHarnessHelper final : public RecordStoreHarnessHelper {
public:
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index 829dbd1a99f..9c2b1155483 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -187,9 +187,7 @@ Status WiredTigerUtil::getApplicationMetadata(OperationContext* opCtx,
if (keysSeen.count(key)) {
return Status(ErrorCodes::Error(50998),
str::stream() << "app_metadata must not contain duplicate keys. "
- << "Found multiple instances of key '"
- << key
- << "'.");
+ << "Found multiple instances of key '" << key << "'.");
}
keysSeen.insert(key);
@@ -265,9 +263,7 @@ StatusWith<int64_t> WiredTigerUtil::checkApplicationMetadataFormatVersion(Operat
if (version < minimumVersion || version > maximumVersion) {
return Status(ErrorCodes::UnsupportedFormat,
str::stream() << "Application metadata for " << uri
- << " has unsupported format version: "
- << version
- << ".");
+ << " has unsupported format version: " << version << ".");
}
LOG(2) << "WiredTigerUtil::checkApplicationMetadataFormatVersion "
@@ -320,8 +316,7 @@ StatusWith<uint64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
if (ret != 0) {
return StatusWith<uint64_t>(ErrorCodes::CursorNotFound,
str::stream() << "unable to open cursor at URI " << uri
- << ". reason: "
- << wiredtiger_strerror(ret));
+ << ". reason: " << wiredtiger_strerror(ret));
}
invariant(cursor);
ON_BLOCK_EXIT([&] { cursor->close(cursor); });
@@ -329,21 +324,19 @@ StatusWith<uint64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
cursor->set_key(cursor, statisticsKey);
ret = cursor->search(cursor);
if (ret != 0) {
- return StatusWith<uint64_t>(
- ErrorCodes::NoSuchKey,
- str::stream() << "unable to find key " << statisticsKey << " at URI " << uri
- << ". reason: "
- << wiredtiger_strerror(ret));
+ return StatusWith<uint64_t>(ErrorCodes::NoSuchKey,
+ str::stream()
+ << "unable to find key " << statisticsKey << " at URI "
+ << uri << ". reason: " << wiredtiger_strerror(ret));
}
uint64_t value;
ret = cursor->get_value(cursor, NULL, NULL, &value);
if (ret != 0) {
- return StatusWith<uint64_t>(
- ErrorCodes::BadValue,
- str::stream() << "unable to get value for key " << statisticsKey << " at URI " << uri
- << ". reason: "
- << wiredtiger_strerror(ret));
+ return StatusWith<uint64_t>(ErrorCodes::BadValue,
+ str::stream() << "unable to get value for key " << statisticsKey
+ << " at URI " << uri
+ << ". reason: " << wiredtiger_strerror(ret));
}
return StatusWith<uint64_t>(value);
@@ -461,7 +454,7 @@ WT_EVENT_HANDLER defaultEventHandlers() {
handlers.handle_progress = mdb_handle_progress;
return handlers;
}
-}
+} // namespace
WiredTigerEventHandler::WiredTigerEventHandler() {
WT_EVENT_HANDLER* handler = static_cast<WT_EVENT_HANDLER*>(this);
@@ -577,8 +570,7 @@ Status WiredTigerUtil::setTableLogging(WT_SESSION* session, const std::string& u
// Sanity check against a table having multiple logging specifications.
invariant(false,
str::stream() << "Table has contradictory logging settings. Uri: " << uri
- << " Conf: "
- << existingMetadata);
+ << " Conf: " << existingMetadata);
}
if (existingMetadata.find(setting) != std::string::npos) {
@@ -617,8 +609,8 @@ Status WiredTigerUtil::exportTableToBSON(WT_SESSION* session,
int ret = session->open_cursor(session, uri.c_str(), NULL, cursorConfig, &c);
if (ret != 0) {
return Status(ErrorCodes::CursorNotFound,
- str::stream() << "unable to open cursor at URI " << uri << ". reason: "
- << wiredtiger_strerror(ret));
+ str::stream() << "unable to open cursor at URI " << uri
+ << ". reason: " << wiredtiger_strerror(ret));
}
bob->append("uri", uri);
invariant(c);
diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp
index d6f6ebca0d2..28c27b67460 100644
--- a/src/mongo/db/system_index.cpp
+++ b/src/mongo/db/system_index.cpp
@@ -73,20 +73,16 @@ const NamespaceString sessionCollectionNamespace("config.system.sessions");
MONGO_INITIALIZER(AuthIndexKeyPatterns)(InitializerContext*) {
v1SystemUsersKeyPattern = BSON("user" << 1 << "userSource" << 1);
- v3SystemUsersKeyPattern = BSON(
- AuthorizationManager::USER_NAME_FIELD_NAME << 1 << AuthorizationManager::USER_DB_FIELD_NAME
- << 1);
- v3SystemRolesKeyPattern = BSON(
- AuthorizationManager::ROLE_NAME_FIELD_NAME << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME
- << 1);
+ v3SystemUsersKeyPattern = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
+ v3SystemRolesKeyPattern = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
v3SystemUsersIndexName =
std::string(str::stream() << AuthorizationManager::USER_NAME_FIELD_NAME << "_1_"
- << AuthorizationManager::USER_DB_FIELD_NAME
- << "_1");
+ << AuthorizationManager::USER_DB_FIELD_NAME << "_1");
v3SystemRolesIndexName =
std::string(str::stream() << AuthorizationManager::ROLE_NAME_FIELD_NAME << "_1_"
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << "_1");
+ << AuthorizationManager::ROLE_DB_FIELD_NAME << "_1");
v3SystemUsersIndexSpec.addKeys(v3SystemUsersKeyPattern);
v3SystemUsersIndexSpec.unique();
diff --git a/src/mongo/db/traffic_reader.cpp b/src/mongo/db/traffic_reader.cpp
index 18fa2baf7dd..b6de5022d0b 100644
--- a/src/mongo/db/traffic_reader.cpp
+++ b/src/mongo/db/traffic_reader.cpp
@@ -93,8 +93,8 @@ bool readBytes(size_t toRead, char* buf, int fd) {
auto pair = errnoAndDescription();
uassert(ErrorCodes::FileStreamFailed,
- str::stream() << "failed to read bytes: errno(" << pair.first << ") : "
- << pair.second,
+ str::stream() << "failed to read bytes: errno(" << pair.first
+ << ") : " << pair.second,
pair.first == EINTR);
continue;
diff --git a/src/mongo/db/traffic_recorder.cpp b/src/mongo/db/traffic_recorder.cpp
index 17f4756cce9..4252cc1cfb5 100644
--- a/src/mongo/db/traffic_recorder.cpp
+++ b/src/mongo/db/traffic_recorder.cpp
@@ -100,7 +100,7 @@ public:
}
void run() {
- _thread = stdx::thread([ consumer = std::move(_pcqPipe.consumer), this ] {
+ _thread = stdx::thread([consumer = std::move(_pcqPipe.consumer), this] {
try {
DataBuilder db;
std::fstream out(_path,
diff --git a/src/mongo/db/traffic_recorder_validators.cpp b/src/mongo/db/traffic_recorder_validators.cpp
index 918784563d2..c9c48501e8d 100644
--- a/src/mongo/db/traffic_recorder_validators.cpp
+++ b/src/mongo/db/traffic_recorder_validators.cpp
@@ -38,8 +38,8 @@ namespace mongo {
Status validateTrafficRecordDestination(const std::string& path) {
if (!path.empty() && !boost::filesystem::is_directory(path)) {
return Status(ErrorCodes::FileNotOpen,
- str::stream() << "traffic recording directory \"" << path
- << "\" is not a directory.");
+ str::stream()
+ << "traffic recording directory \"" << path << "\" is not a directory.");
}
return Status::OK();
diff --git a/src/mongo/db/transaction_history_iterator.cpp b/src/mongo/db/transaction_history_iterator.cpp
index cbef1bafc4d..e9e57c1d0b8 100644
--- a/src/mongo/db/transaction_history_iterator.cpp
+++ b/src/mongo/db/transaction_history_iterator.cpp
@@ -96,8 +96,7 @@ BSONObj findOneOplogEntry(OperationContext* opCtx,
uassert(ErrorCodes::IncompleteTransactionHistory,
str::stream() << "oplog no longer contains the complete write history of this "
"transaction, log with opTime "
- << opTime.toBSON()
- << " cannot be found",
+ << opTime.toBSON() << " cannot be found",
getNextResult != PlanExecutor::IS_EOF);
if (getNextResult != PlanExecutor::ADVANCED) {
uassertStatusOKWithContext(WorkingSetCommon::getMemberObjectStatus(oplogBSON),
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index 7fbbce22dd0..903d3a01b13 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -419,8 +419,7 @@ void TransactionParticipant::Participant::_continueMultiDocumentTransaction(Oper
TxnNumber txnNumber) {
uassert(ErrorCodes::NoSuchTransaction,
str::stream()
- << "Given transaction number "
- << txnNumber
+ << "Given transaction number " << txnNumber
<< " does not match any in-progress transactions. The active transaction number is "
<< o().activeTxnNumber,
txnNumber == o().activeTxnNumber && !o().txnState.isInRetryableWriteMode());
@@ -442,8 +441,7 @@ void TransactionParticipant::Participant::_continueMultiDocumentTransaction(Oper
uasserted(
ErrorCodes::NoSuchTransaction,
str::stream()
- << "Transaction "
- << txnNumber
+ << "Transaction " << txnNumber
<< " has been aborted because an earlier command in this transaction failed.");
}
return;
@@ -503,9 +501,7 @@ void TransactionParticipant::Participant::beginOrContinue(OperationContext* opCt
uassert(ErrorCodes::TransactionTooOld,
str::stream() << "Cannot start transaction " << txnNumber << " on session "
- << _sessionId()
- << " because a newer transaction "
- << o().activeTxnNumber
+ << _sessionId() << " because a newer transaction " << o().activeTxnNumber
<< " has already started.",
txnNumber >= o().activeTxnNumber);
@@ -552,8 +548,7 @@ void TransactionParticipant::Participant::beginOrContinue(OperationContext* opCt
TransactionState::kNone | TransactionState::kAbortedWithoutPrepare;
uassert(50911,
str::stream() << "Cannot start a transaction at given transaction number "
- << txnNumber
- << " a transaction with the same number is in state "
+ << txnNumber << " a transaction with the same number is in state "
<< o().txnState,
o().txnState.isInSet(restartableStates));
}
@@ -1087,8 +1082,7 @@ Timestamp TransactionParticipant::Participant::prepareTransaction(
uassert(ErrorCodes::OperationNotSupportedInTransaction,
str::stream() << "prepareTransaction failed because one of the transaction "
"operations was done against a temporary collection '"
- << collection->ns()
- << "'.",
+ << collection->ns() << "'.",
!collection->isTemporary(opCtx));
}
@@ -1394,8 +1388,7 @@ void TransactionParticipant::Participant::commitPreparedTransaction(
str::stream() << "Commit oplog entry must be greater than or equal to commit "
"timestamp due to causal consistency. commit timestamp: "
<< commitTimestamp.toBSON()
- << ", commit oplog entry optime: "
- << commitOplogSlot.toBSON());
+ << ", commit oplog entry optime: " << commitOplogSlot.toBSON());
} else {
// We always expect a non-null commitOplogEntryOpTime to be passed in on secondaries
// in order to set the finishOpTime.
@@ -1852,8 +1845,7 @@ void TransactionParticipant::TransactionState::transitionTo(StateFlag newState,
if (shouldValidate == TransitionValidation::kValidateTransition) {
invariant(TransactionState::_isLegalTransition(_state, newState),
str::stream() << "Current state: " << toString(_state)
- << ", Illegal attempted next state: "
- << toString(newState));
+ << ", Illegal attempted next state: " << toString(newState));
}
// If we are transitioning out of prepare, signal waiters by fulfilling the completion promise.
@@ -2191,9 +2183,7 @@ boost::optional<repl::OpTime> TransactionParticipant::Participant::_checkStateme
if (it == p().activeTxnCommittedStatements.end()) {
uassert(ErrorCodes::IncompleteTransactionHistory,
str::stream() << "Incomplete history detected for transaction "
- << o().activeTxnNumber
- << " on session "
- << _sessionId(),
+ << o().activeTxnNumber << " on session " << _sessionId(),
!p().hasIncompleteHistory);
return boost::none;
@@ -2217,45 +2207,45 @@ void TransactionParticipant::Participant::_registerUpdateCacheOnCommit(
OperationContext* opCtx,
std::vector<StmtId> stmtIdsWritten,
const repl::OpTime& lastStmtIdWriteOpTime) {
- opCtx->recoveryUnit()->onCommit(
- [ opCtx, stmtIdsWritten = std::move(stmtIdsWritten), lastStmtIdWriteOpTime ](
- boost::optional<Timestamp>) {
- TransactionParticipant::Participant participant(opCtx);
- invariant(participant.p().isValid);
-
- RetryableWritesStats::get(opCtx->getServiceContext())
- ->incrementTransactionsCollectionWriteCount();
-
- stdx::lock_guard<Client> lg(*opCtx->getClient());
-
- // The cache of the last written record must always be advanced after a write so that
- // subsequent writes have the correct point to start from.
- participant.o(lg).lastWriteOpTime = lastStmtIdWriteOpTime;
-
- for (const auto stmtId : stmtIdsWritten) {
- if (stmtId == kIncompleteHistoryStmtId) {
- participant.p().hasIncompleteHistory = true;
- continue;
- }
-
- const auto insertRes = participant.p().activeTxnCommittedStatements.emplace(
- stmtId, lastStmtIdWriteOpTime);
- if (!insertRes.second) {
- const auto& existingOpTime = insertRes.first->second;
- fassertOnRepeatedExecution(participant._sessionId(),
- participant.o().activeTxnNumber,
- stmtId,
- existingOpTime,
- lastStmtIdWriteOpTime);
- }
+ opCtx->recoveryUnit()->onCommit([opCtx,
+ stmtIdsWritten = std::move(stmtIdsWritten),
+ lastStmtIdWriteOpTime](boost::optional<Timestamp>) {
+ TransactionParticipant::Participant participant(opCtx);
+ invariant(participant.p().isValid);
+
+ RetryableWritesStats::get(opCtx->getServiceContext())
+ ->incrementTransactionsCollectionWriteCount();
+
+ stdx::lock_guard<Client> lg(*opCtx->getClient());
+
+ // The cache of the last written record must always be advanced after a write so that
+ // subsequent writes have the correct point to start from.
+ participant.o(lg).lastWriteOpTime = lastStmtIdWriteOpTime;
+
+ for (const auto stmtId : stmtIdsWritten) {
+ if (stmtId == kIncompleteHistoryStmtId) {
+ participant.p().hasIncompleteHistory = true;
+ continue;
}
- // If this is the first time executing a retryable write, we should indicate that to
- // the transaction participant.
- if (participant.o(lg).txnState.isNone()) {
- participant.o(lg).txnState.transitionTo(TransactionState::kExecutedRetryableWrite);
+ const auto insertRes =
+ participant.p().activeTxnCommittedStatements.emplace(stmtId, lastStmtIdWriteOpTime);
+ if (!insertRes.second) {
+ const auto& existingOpTime = insertRes.first->second;
+ fassertOnRepeatedExecution(participant._sessionId(),
+ participant.o().activeTxnNumber,
+ stmtId,
+ existingOpTime,
+ lastStmtIdWriteOpTime);
}
- });
+ }
+
+ // If this is the first time executing a retryable write, we should indicate that to
+ // the transaction participant.
+ if (participant.o(lg).txnState.isNone()) {
+ participant.o(lg).txnState.transitionTo(TransactionState::kExecutedRetryableWrite);
+ }
+ });
MONGO_FAIL_POINT_BLOCK(onPrimaryTransactionalWrite, customArgs) {
const auto& data = customArgs.getData();
@@ -2269,9 +2259,9 @@ void TransactionParticipant::Participant::_registerUpdateCacheOnCommit(
if (!failBeforeCommitExceptionElem.eoo()) {
const auto failureCode = ErrorCodes::Error(int(failBeforeCommitExceptionElem.Number()));
uasserted(failureCode,
- str::stream() << "Failing write for " << _sessionId() << ":"
- << o().activeTxnNumber
- << " due to failpoint. The write must not be reflected.");
+ str::stream()
+ << "Failing write for " << _sessionId() << ":" << o().activeTxnNumber
+ << " due to failpoint. The write must not be reflected.");
}
}
}
diff --git a/src/mongo/db/transaction_participant_test.cpp b/src/mongo/db/transaction_participant_test.cpp
index f4764ad0e8c..047310244f5 100644
--- a/src/mongo/db/transaction_participant_test.cpp
+++ b/src/mongo/db/transaction_participant_test.cpp
@@ -376,11 +376,11 @@ TEST_F(TxnParticipantTest, StashAndUnstashResources) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Perform initial unstash which sets up a WriteUnitOfWork.
@@ -1154,20 +1154,19 @@ TEST_F(TxnParticipantTest, CannotStartNewTransactionWhilePreparedTransactionInPr
auto guard = makeGuard([&]() { OperationContextSession::checkOut(opCtx()); });
// Try to start a new transaction while there is already a prepared transaction on the
// session. This should fail with a PreparedTransactionInProgress error.
- runFunctionFromDifferentOpCtx([
- lsid = *opCtx()->getLogicalSessionId(),
- txnNumberToStart = *opCtx()->getTxnNumber() + 1
- ](OperationContext * newOpCtx) {
- newOpCtx->setLogicalSessionId(lsid);
- newOpCtx->setTxnNumber(txnNumberToStart);
-
- MongoDOperationContextSession ocs(newOpCtx);
- auto txnParticipant = TransactionParticipant::get(newOpCtx);
- ASSERT_THROWS_CODE(
- txnParticipant.beginOrContinue(newOpCtx, txnNumberToStart, false, true),
- AssertionException,
- ErrorCodes::PreparedTransactionInProgress);
- });
+ runFunctionFromDifferentOpCtx(
+ [lsid = *opCtx()->getLogicalSessionId(),
+ txnNumberToStart = *opCtx()->getTxnNumber() + 1](OperationContext* newOpCtx) {
+ newOpCtx->setLogicalSessionId(lsid);
+ newOpCtx->setTxnNumber(txnNumberToStart);
+
+ MongoDOperationContextSession ocs(newOpCtx);
+ auto txnParticipant = TransactionParticipant::get(newOpCtx);
+ ASSERT_THROWS_CODE(
+ txnParticipant.beginOrContinue(newOpCtx, txnNumberToStart, false, true),
+ AssertionException,
+ ErrorCodes::PreparedTransactionInProgress);
+ });
}
ASSERT_FALSE(txnParticipant.transactionIsAborted());
@@ -1278,11 +1277,11 @@ TEST_F(TxnParticipantTest, StashInNestedSessionIsANoop) {
// Set the readConcern on the OperationContext.
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Perform initial unstash, which sets up a WriteUnitOfWork.
@@ -2672,11 +2671,11 @@ TEST_F(TransactionsMetricsTest, ReportStashedResources) {
std::move(clientMetadata.getValue()));
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Perform initial unstash which sets up a WriteUnitOfWork.
@@ -2759,11 +2758,11 @@ TEST_F(TransactionsMetricsTest, ReportUnstashedResources) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Perform initial unstash which sets up a WriteUnitOfWork.
@@ -3108,11 +3107,11 @@ TEST_F(TransactionsMetricsTest, TestTransactionInfoForLogAfterCommit) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
@@ -3148,11 +3147,11 @@ TEST_F(TransactionsMetricsTest, TestPreparedTransactionInfoForLogAfterCommit) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
@@ -3190,11 +3189,11 @@ TEST_F(TransactionsMetricsTest, TestTransactionInfoForLogAfterAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3231,11 +3230,11 @@ TEST_F(TransactionsMetricsTest, TestPreparedTransactionInfoForLogAfterAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Prepare the transaction and extend the duration in the prepared state.
@@ -3269,11 +3268,11 @@ DEATH_TEST_F(TransactionsMetricsTest, TestTransactionInfoForLogWithNoLockerInfoS
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3293,11 +3292,11 @@ TEST_F(TransactionsMetricsTest, LogTransactionInfoAfterSlowCommit) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3331,11 +3330,11 @@ TEST_F(TransactionsMetricsTest, LogPreparedTransactionInfoAfterSlowCommit) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3368,11 +3367,11 @@ TEST_F(TransactionsMetricsTest, LogTransactionInfoAfterSlowAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3411,11 +3410,11 @@ TEST_F(TransactionsMetricsTest, LogPreparedTransactionInfoAfterSlowAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3457,11 +3456,11 @@ TEST_F(TransactionsMetricsTest, LogTransactionInfoAfterExceptionInPrepare) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3504,11 +3503,11 @@ TEST_F(TransactionsMetricsTest, LogTransactionInfoAfterSlowStashedAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3589,11 +3588,11 @@ TEST_F(TxnParticipantTest, RollbackResetsInMemoryStateOfPreparedTransaction) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
diff --git a/src/mongo/db/update/addtoset_node.cpp b/src/mongo/db/update/addtoset_node.cpp
index 4805ae5c825..b12c4ceeb9b 100644
--- a/src/mongo/db/update/addtoset_node.cpp
+++ b/src/mongo/db/update/addtoset_node.cpp
@@ -108,8 +108,7 @@ ModifierNode::ModifyResult AddToSetNode::updateExistingElement(
mutablebson::Element* element, std::shared_ptr<FieldRef> elementPath) const {
uassert(ErrorCodes::BadValue,
str::stream() << "Cannot apply $addToSet to non-array field. Field named '"
- << element->getFieldName()
- << "' has non-array type "
+ << element->getFieldName() << "' has non-array type "
<< typeName(element->getType()),
element->getType() == BSONType::Array);
diff --git a/src/mongo/db/update/addtoset_node_test.cpp b/src/mongo/db/update/addtoset_node_test.cpp
index 9c3bfc283a5..0aaf434fcdb 100644
--- a/src/mongo/db/update/addtoset_node_test.cpp
+++ b/src/mongo/db/update/addtoset_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using AddToSetNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(AddToSetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$addToSet: {}}");
diff --git a/src/mongo/db/update/arithmetic_node.cpp b/src/mongo/db/update/arithmetic_node.cpp
index 304b0261e90..58c0d4a27ab 100644
--- a/src/mongo/db/update/arithmetic_node.cpp
+++ b/src/mongo/db/update/arithmetic_node.cpp
@@ -55,9 +55,7 @@ Status ArithmeticNode::init(BSONElement modExpr,
if (!modExpr.isNumber()) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Cannot " << getNameForOp(_op)
- << " with non-numeric argument: {"
- << modExpr
- << "}");
+ << " with non-numeric argument: {" << modExpr << "}");
}
_val = modExpr;
@@ -72,10 +70,8 @@ ModifierNode::ModifyResult ArithmeticNode::updateExistingElement(
str::stream() << "Cannot apply " << operatorName()
<< " to a value of non-numeric type. {"
<< (idElem.ok() ? idElem.toString() : "no id")
- << "} has the field '"
- << element->getFieldName()
- << "' of non-numeric type "
- << typeName(element->getType()));
+ << "} has the field '" << element->getFieldName()
+ << "' of non-numeric type " << typeName(element->getType()));
}
SafeNum originalValue = element->getValueSafeNum();
@@ -97,10 +93,8 @@ ModifierNode::ModifyResult ArithmeticNode::updateExistingElement(
auto idElem = mutablebson::findFirstChildNamed(element->getDocument().root(), "_id");
uasserted(ErrorCodes::BadValue,
str::stream() << "Failed to apply " << operatorName()
- << " operations to current value ("
- << originalValue.debugString()
- << ") for document {"
- << (idElem.ok() ? idElem.toString() : "no id")
+ << " operations to current value (" << originalValue.debugString()
+ << ") for document {" << (idElem.ok() ? idElem.toString() : "no id")
<< "}");
} else {
invariant(element->setValueSafeNum(valueToSet));
diff --git a/src/mongo/db/update/arithmetic_node_test.cpp b/src/mongo/db/update/arithmetic_node_test.cpp
index d18cc4f1314..2783a32d547 100644
--- a/src/mongo/db/update/arithmetic_node_test.cpp
+++ b/src/mongo/db/update/arithmetic_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using ArithmeticNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(ArithmeticNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$inc: {}}");
diff --git a/src/mongo/db/update/bit_node.cpp b/src/mongo/db/update/bit_node.cpp
index 67a334970fc..19f7a560846 100644
--- a/src/mongo/db/update/bit_node.cpp
+++ b/src/mongo/db/update/bit_node.cpp
@@ -60,9 +60,7 @@ Status BitNode::init(BSONElement modExpr, const boost::intrusive_ptr<ExpressionC
return Status(ErrorCodes::BadValue,
str::stream()
<< "The $bit modifier only supports 'and', 'or', and 'xor', not '"
- << payloadFieldName
- << "' which is an unknown operator: {"
- << curOp
+ << payloadFieldName << "' which is an unknown operator: {" << curOp
<< "}");
}
@@ -70,9 +68,7 @@ Status BitNode::init(BSONElement modExpr, const boost::intrusive_ptr<ExpressionC
return Status(ErrorCodes::BadValue,
str::stream()
<< "The $bit modifier field must be an Integer(32/64 bit); a '"
- << typeName(curOp.type())
- << "' is not supported here: {"
- << curOp
+ << typeName(curOp.type()) << "' is not supported here: {" << curOp
<< "}");
}
@@ -97,11 +93,8 @@ ModifierNode::ModifyResult BitNode::updateExistingElement(
mutablebson::findFirstChildNamed(element->getDocument().root(), "_id");
uasserted(ErrorCodes::BadValue,
str::stream() << "Cannot apply $bit to a value of non-integral type."
- << idElem.toString()
- << " has the field "
- << element->getFieldName()
- << " of non-integer type "
- << typeName(element->getType()));
+ << idElem.toString() << " has the field " << element->getFieldName()
+ << " of non-integer type " << typeName(element->getType()));
}
SafeNum value = applyOpList(element->getValueSafeNum());
diff --git a/src/mongo/db/update/bit_node.h b/src/mongo/db/update/bit_node.h
index 96840fdec1e..91b7181a116 100644
--- a/src/mongo/db/update/bit_node.h
+++ b/src/mongo/db/update/bit_node.h
@@ -71,7 +71,7 @@ private:
BSONObjBuilder bob;
{
BSONObjBuilder subBuilder(bob.subobjStart(""));
- for (const auto[bitOperator, operand] : _opList) {
+ for (const auto [bitOperator, operand] : _opList) {
operand.toBSON(
[](SafeNum (SafeNum::*bitOperator)(const SafeNum&) const) {
if (bitOperator == &SafeNum::bitAnd)
diff --git a/src/mongo/db/update/bit_node_test.cpp b/src/mongo/db/update/bit_node_test.cpp
index 78734dd63dd..488ad971e5d 100644
--- a/src/mongo/db/update/bit_node_test.cpp
+++ b/src/mongo/db/update/bit_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using BitNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(BitNodeTest, InitWithDoubleFails) {
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -292,4 +292,4 @@ TEST_F(BitNodeTest, ApplyRepeatedBitOps) {
}
} // namespace
-} // namepace mongo
+} // namespace mongo
diff --git a/src/mongo/db/update/compare_node_test.cpp b/src/mongo/db/update/compare_node_test.cpp
index b500701cf2d..05c5d9ee68a 100644
--- a/src/mongo/db/update/compare_node_test.cpp
+++ b/src/mongo/db/update/compare_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using CompareNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(CompareNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$max: {}}");
diff --git a/src/mongo/db/update/current_date_node_test.cpp b/src/mongo/db/update/current_date_node_test.cpp
index 7bd11c9140b..e16a2cdbe46 100644
--- a/src/mongo/db/update/current_date_node_test.cpp
+++ b/src/mongo/db/update/current_date_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using CurrentDateNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(CurrentDateNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$currentDate: {}}");
@@ -286,4 +286,4 @@ TEST_F(CurrentDateNodeTest, ApplyNoIndexDataOrLogBuilder) {
}
} // namespace
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/update/field_checker_test.cpp b/src/mongo/db/update/field_checker_test.cpp
index 99deff9fb07..d95b2bc681f 100644
--- a/src/mongo/db/update/field_checker_test.cpp
+++ b/src/mongo/db/update/field_checker_test.cpp
@@ -38,9 +38,9 @@ namespace {
using mongo::ErrorCodes;
using mongo::FieldRef;
-using mongo::fieldchecker::isUpdatable;
-using mongo::fieldchecker::isPositional;
using mongo::Status;
+using mongo::fieldchecker::isPositional;
+using mongo::fieldchecker::isUpdatable;
TEST(IsUpdatable, Basics) {
FieldRef fieldRef("x");
diff --git a/src/mongo/db/update/log_builder.cpp b/src/mongo/db/update/log_builder.cpp
index e78cd295b1f..5fbd6514791 100644
--- a/src/mongo/db/update/log_builder.cpp
+++ b/src/mongo/db/update/log_builder.cpp
@@ -89,11 +89,9 @@ Status LogBuilder::addToSetsWithNewFieldName(StringData name, const mutablebson:
mutablebson::Element elemToSet = _logRoot.getDocument().makeElementWithNewFieldName(name, val);
if (!elemToSet.ok())
return Status(ErrorCodes::InternalError,
- str::stream() << "Could not create new '" << name
- << "' element from existing element '"
- << val.getFieldName()
- << "' of type "
- << typeName(val.getType()));
+ str::stream()
+ << "Could not create new '" << name << "' element from existing element '"
+ << val.getFieldName() << "' of type " << typeName(val.getType()));
return addToSets(elemToSet);
}
@@ -102,11 +100,9 @@ Status LogBuilder::addToSetsWithNewFieldName(StringData name, const BSONElement&
mutablebson::Element elemToSet = _logRoot.getDocument().makeElementWithNewFieldName(name, val);
if (!elemToSet.ok())
return Status(ErrorCodes::InternalError,
- str::stream() << "Could not create new '" << name
- << "' element from existing element '"
- << val.fieldName()
- << "' of type "
- << typeName(val.type()));
+ str::stream()
+ << "Could not create new '" << name << "' element from existing element '"
+ << val.fieldName() << "' of type " << typeName(val.type()));
return addToSets(elemToSet);
}
diff --git a/src/mongo/db/update/modifier_node.cpp b/src/mongo/db/update/modifier_node.cpp
index 674a2d8e361..dd0341255a3 100644
--- a/src/mongo/db/update/modifier_node.cpp
+++ b/src/mongo/db/update/modifier_node.cpp
@@ -66,10 +66,8 @@ void checkImmutablePathsNotModifiedFromOriginal(mutablebson::Element element,
if (prefixSize == (*immutablePath)->numParts()) {
uasserted(ErrorCodes::ImmutableField,
str::stream() << "Updating the path '" << pathTaken->dottedField() << "' to "
- << element.toString()
- << " would modify the immutable field '"
- << (*immutablePath)->dottedField()
- << "'");
+ << element.toString() << " would modify the immutable field '"
+ << (*immutablePath)->dottedField() << "'");
}
// If 'pathTaken' is a strict prefix of 'immutablePath', then we may have modified
@@ -106,8 +104,7 @@ void checkImmutablePathsNotModifiedFromOriginal(mutablebson::Element element,
uassert(ErrorCodes::ImmutableField,
str::stream() << "After applying the update, the immutable field '"
<< (*immutablePath)->dottedField()
- << "' was found to have been altered to "
- << newElem.toString(),
+ << "' was found to have been altered to " << newElem.toString(),
newElem.compareWithBSONElement(oldElem, nullptr, false) == 0);
}
}
@@ -137,8 +134,7 @@ void checkImmutablePathsNotModified(mutablebson::Element element,
uassert(ErrorCodes::ImmutableField,
str::stream() << "Performing an update on the path '" << pathTaken->dottedField()
<< "' would modify the immutable field '"
- << (*immutablePath)->dottedField()
- << "'",
+ << (*immutablePath)->dottedField() << "'",
pathTaken->commonPrefixSize(**immutablePath) <
std::min(pathTaken->numParts(), (*immutablePath)->numParts()));
}
@@ -265,12 +261,10 @@ UpdateExecutor::ApplyResult ModifierNode::applyToNonexistentElement(
// because we just created this element.)
uassert(ErrorCodes::ImmutableField,
str::stream() << "Updating the path '"
- << updateNodeApplyParams.pathTaken->dottedField()
- << "' to "
+ << updateNodeApplyParams.pathTaken->dottedField() << "' to "
<< applyParams.element.toString()
<< " would modify the immutable field '"
- << (*immutablePath)->dottedField()
- << "'",
+ << (*immutablePath)->dottedField() << "'",
updateNodeApplyParams.pathTaken->commonPrefixSize(**immutablePath) !=
(*immutablePath)->numParts());
}
diff --git a/src/mongo/db/update/object_replace_executor.cpp b/src/mongo/db/update/object_replace_executor.cpp
index 8a65cd1b0ca..31ea35df114 100644
--- a/src/mongo/db/update/object_replace_executor.cpp
+++ b/src/mongo/db/update/object_replace_executor.cpp
@@ -136,8 +136,7 @@ UpdateExecutor::ApplyResult ObjectReplaceExecutor::applyReplacementUpdate(
uassert(ErrorCodes::ImmutableField,
str::stream() << "After applying the update, the (immutable) field '"
<< (*path)->dottedField()
- << "' was found to have been altered to "
- << newElem.toString(),
+ << "' was found to have been altered to " << newElem.toString(),
newElem.compareWithBSONElement(oldElem, nullptr, false) == 0);
}
}
diff --git a/src/mongo/db/update/object_replace_executor_test.cpp b/src/mongo/db/update/object_replace_executor_test.cpp
index cef054fd289..6b0d93f6e46 100644
--- a/src/mongo/db/update/object_replace_executor_test.cpp
+++ b/src/mongo/db/update/object_replace_executor_test.cpp
@@ -42,8 +42,8 @@ namespace mongo {
namespace {
using ObjectReplaceExecutorTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST_F(ObjectReplaceExecutorTest, Noop) {
auto obj = fromjson("{a: 1, b: 2}");
diff --git a/src/mongo/db/update/path_support.cpp b/src/mongo/db/update/path_support.cpp
index 6b9fc80a284..934e17e0006 100644
--- a/src/mongo/db/update/path_support.cpp
+++ b/src/mongo/db/update/path_support.cpp
@@ -53,8 +53,8 @@ Status maybePadTo(mutablebson::Element* elemArray, size_t sizeRequired) {
if (toPad > kMaxPaddingAllowed) {
return Status(ErrorCodes::CannotBackfillArray,
- str::stream() << "can't backfill more than " << kMaxPaddingAllowed
- << " elements");
+ str::stream()
+ << "can't backfill more than " << kMaxPaddingAllowed << " elements");
}
for (size_t i = 0; i < toPad; i++) {
@@ -128,10 +128,8 @@ Status findLongestPrefix(const FieldRef& prefix,
*elemFound = prev;
return Status(ErrorCodes::PathNotViable,
str::stream() << "cannot use the part (" << prefix.getPart(i - 1) << " of "
- << prefix.dottedField()
- << ") to traverse the element ({"
- << curr.toString()
- << "})");
+ << prefix.dottedField() << ") to traverse the element ({"
+ << curr.toString() << "})");
} else if (curr.ok()) {
*idxFound = i - 1;
*elemFound = curr;
@@ -153,9 +151,7 @@ StatusWith<mutablebson::Element> createPathAt(const FieldRef& prefix,
if (elemFound.getType() != BSONType::Object && elemFound.getType() != BSONType::Array) {
return Status(ErrorCodes::PathNotViable,
str::stream() << "Cannot create field '" << prefix.getPart(idxFound)
- << "' in element {"
- << elemFound.toString()
- << "}");
+ << "' in element {" << elemFound.toString() << "}");
}
// Sanity check that 'idxField' is an actual part.
@@ -175,9 +171,7 @@ StatusWith<mutablebson::Element> createPathAt(const FieldRef& prefix,
if (!newIdx) {
return Status(ErrorCodes::PathNotViable,
str::stream() << "Cannot create field '" << prefix.getPart(idxFound)
- << "' in element {"
- << elemFound.toString()
- << "}");
+ << "' in element {" << elemFound.toString() << "}");
}
status = maybePadTo(&elemFound, *newIdx);
diff --git a/src/mongo/db/update/path_support_test.cpp b/src/mongo/db/update/path_support_test.cpp
index 78f721e558d..bed1be8cadb 100644
--- a/src/mongo/db/update/path_support_test.cpp
+++ b/src/mongo/db/update/path_support_test.cpp
@@ -58,10 +58,10 @@ namespace {
using namespace mongo;
using namespace pathsupport;
-using str::stream;
using mutablebson::Element;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
+using str::stream;
class EmptyDoc : public mongo::unittest::Test {
public:
@@ -607,9 +607,7 @@ static void assertContains(const EqualityMatches& equalities, const BSONObj& wra
&SimpleStringDataComparator::kInstance);
if (eltCmp.evaluate(it->second->getData() != value)) {
FAIL(stream() << "Equality match at path \"" << path << "\" contains value "
- << it->second->getData()
- << ", not value "
- << value);
+ << it->second->getData() << ", not value " << value);
}
}
@@ -899,19 +897,14 @@ static void assertParent(const EqualityMatches& equalities,
StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
if (foundParentPath != parentPath) {
FAIL(stream() << "Equality match parent at path \"" << foundParentPath
- << "\" does not match \""
- << parentPath
- << "\"");
+ << "\" does not match \"" << parentPath << "\"");
}
BSONElementComparator eltCmp(BSONElementComparator::FieldNamesMode::kIgnore,
&SimpleStringDataComparator::kInstance);
if (eltCmp.evaluate(parentEl != value)) {
FAIL(stream() << "Equality match parent for \"" << pathStr << "\" at path \"" << parentPath
- << "\" contains value "
- << parentEl
- << ", not value "
- << value);
+ << "\" contains value " << parentEl << ", not value " << value);
}
}
@@ -931,8 +924,7 @@ static void assertNoParent(const EqualityMatches& equalities, StringData pathStr
if (!parentEl.eoo()) {
StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
FAIL(stream() << "Equality matches contained parent for \"" << pathStr << "\" at \""
- << foundParentPath
- << "\"");
+ << foundParentPath << "\"");
}
}
diff --git a/src/mongo/db/update/pipeline_executor_test.cpp b/src/mongo/db/update/pipeline_executor_test.cpp
index 2a10c292532..1c5c4297485 100644
--- a/src/mongo/db/update/pipeline_executor_test.cpp
+++ b/src/mongo/db/update/pipeline_executor_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using PipelineExecutorTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST_F(PipelineExecutorTest, Noop) {
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
diff --git a/src/mongo/db/update/pop_node.cpp b/src/mongo/db/update/pop_node.cpp
index 35ff39204af..3d4355793f1 100644
--- a/src/mongo/db/update/pop_node.cpp
+++ b/src/mongo/db/update/pop_node.cpp
@@ -54,8 +54,7 @@ ModifierNode::ModifyResult PopNode::updateExistingElement(
uassert(ErrorCodes::TypeMismatch,
str::stream() << "Path '" << elementPath->dottedField()
<< "' contains an element of non-array type '"
- << typeName(element->getType())
- << "'",
+ << typeName(element->getType()) << "'",
element->getType() == BSONType::Array);
if (!element->hasChildren()) {
diff --git a/src/mongo/db/update/pull_node_test.cpp b/src/mongo/db/update/pull_node_test.cpp
index b9092a98927..39f41ba06f1 100644
--- a/src/mongo/db/update/pull_node_test.cpp
+++ b/src/mongo/db/update/pull_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using PullNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(PullNodeTest, InitWithBadMatchExpressionFails) {
auto update = fromjson("{$pull: {a: {b: {$foo: 1}}}}");
diff --git a/src/mongo/db/update/pullall_node.cpp b/src/mongo/db/update/pullall_node.cpp
index c082823657c..88f0e8bc6eb 100644
--- a/src/mongo/db/update/pullall_node.cpp
+++ b/src/mongo/db/update/pullall_node.cpp
@@ -48,7 +48,7 @@ public:
bool match(const mutablebson::ConstElement& element) final {
return std::any_of(_elementsToMatch.begin(),
_elementsToMatch.end(),
- [&element, collator{_collator} ](const auto& elementToMatch) {
+ [&element, collator{_collator}](const auto& elementToMatch) {
return element.compareWithBSONElement(
elementToMatch, collator, false) == 0;
});
diff --git a/src/mongo/db/update/pullall_node_test.cpp b/src/mongo/db/update/pullall_node_test.cpp
index 60b09e7b77d..dd77b411dcf 100644
--- a/src/mongo/db/update/pullall_node_test.cpp
+++ b/src/mongo/db/update/pullall_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using PullAllNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(PullAllNodeTest, InitWithIntFails) {
auto update = fromjson("{$pullAll: {a: 1}}");
diff --git a/src/mongo/db/update/push_node.cpp b/src/mongo/db/update/push_node.cpp
index 6702af4fec3..a4a79fb6e5a 100644
--- a/src/mongo/db/update/push_node.cpp
+++ b/src/mongo/db/update/push_node.cpp
@@ -292,10 +292,8 @@ ModifierNode::ModifyResult PushNode::performPush(mutablebson::Element* element,
uasserted(ErrorCodes::BadValue,
str::stream() << "The field '" << elementPath->dottedField() << "'"
<< " must be an array but is of type "
- << typeName(element->getType())
- << " in document {"
- << (idElem.ok() ? idElem.toString() : "no id")
- << "}");
+ << typeName(element->getType()) << " in document {"
+ << (idElem.ok() ? idElem.toString() : "no id") << "}");
}
auto result = insertElementsWithPosition(element, _position, _valuesToPush);
diff --git a/src/mongo/db/update/push_node_test.cpp b/src/mongo/db/update/push_node_test.cpp
index d0ef73e22e5..985ee81ca2c 100644
--- a/src/mongo/db/update/push_node_test.cpp
+++ b/src/mongo/db/update/push_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using PushNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(PushNodeTest, EachClauseWithNonArrayObjectFails) {
auto update = fromjson("{$push: {x: {$each: {'0': 1}}}}");
@@ -670,12 +670,9 @@ void checkDocumentAndResult(BSONObj updateModifier,
FAIL(str::stream() << "apply() failure for " << updateModifier << ". Expected "
<< expectedDocument
<< " (noop = false, indexesAffected = false) but got "
- << actualDocument.toString()
- << " (noop = "
- << (applyResult.noop ? "true" : "false")
- << ", indexesAffected = "
- << (applyResult.indexesAffected ? "true" : "false")
- << ").");
+ << actualDocument.toString() << " (noop = "
+ << (applyResult.noop ? "true" : "false") << ", indexesAffected = "
+ << (applyResult.indexesAffected ? "true" : "false") << ").");
}
}
@@ -828,9 +825,7 @@ TEST_F(PushNodeTest, ApplyToPopulatedArrayWithSortAndSliceValues) {
auto update =
BSON("$push" << BSON("a" << BSON("$each" << BSON_ARRAY(BSON("a" << 2 << "b" << 1)
<< BSON("a" << 1 << "b" << 1))
- << "$slice"
- << data.sliceValue
- << "$sort"
+ << "$slice" << data.sliceValue << "$sort"
<< data.sortOrder)));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
PushNode node;
diff --git a/src/mongo/db/update/rename_node.cpp b/src/mongo/db/update/rename_node.cpp
index 4177a5f446d..fe9e12650ef 100644
--- a/src/mongo/db/update/rename_node.cpp
+++ b/src/mongo/db/update/rename_node.cpp
@@ -133,8 +133,8 @@ Status RenameNode::init(BSONElement modExpr,
// Though we could treat this as a no-op, it is illegal in the current implementation.
if (fromFieldRef == toFieldRef) {
return Status(ErrorCodes::BadValue,
- str::stream() << "The source and target field for $rename must differ: "
- << modExpr);
+ str::stream()
+ << "The source and target field for $rename must differ: " << modExpr);
}
if (fromFieldRef.isPrefixOf(toFieldRef) || toFieldRef.isPrefixOf(fromFieldRef)) {
@@ -203,12 +203,10 @@ UpdateExecutor::ApplyResult RenameNode::apply(ApplyParams applyParams,
auto idElem = mutablebson::findFirstChildNamed(document.root(), "_id");
uasserted(ErrorCodes::BadValue,
str::stream() << "The source field cannot be an array element, '"
- << fromFieldRef->dottedField()
- << "' in doc with "
+ << fromFieldRef->dottedField() << "' in doc with "
<< (idElem.ok() ? idElem.toString() : "no id")
<< " has an array field called '"
- << currentElement.getFieldName()
- << "'");
+ << currentElement.getFieldName() << "'");
}
}
@@ -225,12 +223,10 @@ UpdateExecutor::ApplyResult RenameNode::apply(ApplyParams applyParams,
auto idElem = mutablebson::findFirstChildNamed(document.root(), "_id");
uasserted(ErrorCodes::BadValue,
str::stream() << "The destination field cannot be an array element, '"
- << toFieldRef.dottedField()
- << "' in doc with "
+ << toFieldRef.dottedField() << "' in doc with "
<< (idElem.ok() ? idElem.toString() : "no id")
<< " has an array field called '"
- << currentElement.getFieldName()
- << "'");
+ << currentElement.getFieldName() << "'");
}
}
diff --git a/src/mongo/db/update/rename_node_test.cpp b/src/mongo/db/update/rename_node_test.cpp
index 93ddfd61714..6eec4d8f498 100644
--- a/src/mongo/db/update/rename_node_test.cpp
+++ b/src/mongo/db/update/rename_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using RenameNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(RenameNodeTest, PositionalNotAllowedInFromField) {
auto update = fromjson("{$rename: {'a.$': 'b'}}");
@@ -476,8 +476,7 @@ TEST_F(RenameNodeTest, ApplyCanRemoveRequiredPartOfDBRefIfValidateForStorageIsFa
ASSERT_TRUE(result.indexesAffected);
auto updated = BSON("a" << BSON("$ref"
<< "c")
- << "b"
- << 0);
+ << "b" << 0);
ASSERT_EQUALS(updated, doc);
ASSERT_FALSE(doc.isInPlaceModeEnabled());
ASSERT_EQUALS(fromjson("{$set: {'b': 0}, $unset: {'a.$id': true}}"), getLogDoc());
diff --git a/src/mongo/db/update/set_node_test.cpp b/src/mongo/db/update/set_node_test.cpp
index f7280e83110..8f160c4fe13 100644
--- a/src/mongo/db/update/set_node_test.cpp
+++ b/src/mongo/db/update/set_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using SetNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(SetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$set: {}}");
diff --git a/src/mongo/db/update/storage_validation.cpp b/src/mongo/db/update/storage_validation.cpp
index ce5147f42e2..009343776f0 100644
--- a/src/mongo/db/update/storage_validation.cpp
+++ b/src/mongo/db/update/storage_validation.cpp
@@ -104,8 +104,7 @@ void validateDollarPrefixElement(mutablebson::ConstElement elem) {
// Not an okay, $ prefixed field name.
uasserted(ErrorCodes::DollarPrefixedFieldName,
str::stream() << "The dollar ($) prefixed field '" << elem.getFieldName()
- << "' in '"
- << mutablebson::getFullName(elem)
+ << "' in '" << mutablebson::getFullName(elem)
<< "' is not valid for storage.");
}
}
diff --git a/src/mongo/db/update/unset_node_test.cpp b/src/mongo/db/update/unset_node_test.cpp
index 346c5e4551c..09788ef573b 100644
--- a/src/mongo/db/update/unset_node_test.cpp
+++ b/src/mongo/db/update/unset_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using UnsetNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(UnsetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$unset: {}}");
diff --git a/src/mongo/db/update/update_array_node.h b/src/mongo/db/update/update_array_node.h
index 0c0ec5550d8..67af11a6cd3 100644
--- a/src/mongo/db/update/update_array_node.h
+++ b/src/mongo/db/update/update_array_node.h
@@ -86,7 +86,7 @@ public:
FieldRef* currentPath,
std::map<std::string, std::vector<std::pair<std::string, BSONObj>>>*
operatorOrientedUpdates) const final {
- for (const auto & [ pathSuffix, child ] : _children) {
+ for (const auto& [pathSuffix, child] : _children) {
FieldRef::FieldRefTempAppend tempAppend(*currentPath,
toArrayFilterIdentifier(pathSuffix));
child->produceSerializationMap(currentPath, operatorOrientedUpdates);
diff --git a/src/mongo/db/update/update_driver.cpp b/src/mongo/db/update/update_driver.cpp
index 3f2024dba1c..0349deb4e8f 100644
--- a/src/mongo/db/update/update_driver.cpp
+++ b/src/mongo/db/update/update_driver.cpp
@@ -76,26 +76,21 @@ modifiertable::ModifierType validateMod(BSONElement mod) {
uassert(
ErrorCodes::FailedToParse,
str::stream()
- << "Unknown modifier: "
- << mod.fieldName()
+ << "Unknown modifier: " << mod.fieldName()
<< ". Expected a valid update modifier or pipeline-style update specified as an array",
modType != modifiertable::MOD_UNKNOWN);
uassert(ErrorCodes::FailedToParse,
str::stream() << "Modifiers operate on fields but we found type "
- << typeName(mod.type())
- << " instead. For example: {$mod: {<field>: ...}}"
- << " not {"
- << mod
- << "}",
+ << typeName(mod.type()) << " instead. For example: {$mod: {<field>: ...}}"
+ << " not {" << mod << "}",
mod.type() == BSONType::Object);
uassert(ErrorCodes::FailedToParse,
str::stream() << "'" << mod.fieldName()
<< "' is empty. You must specify a field like so: "
"{"
- << mod.fieldName()
- << ": {<field>: ...}}",
+ << mod.fieldName() << ": {<field>: ...}}",
!mod.embeddedObject().isEmpty());
return modType;
@@ -134,8 +129,7 @@ bool parseUpdateExpression(
for (const auto& arrayFilter : arrayFilters) {
uassert(ErrorCodes::FailedToParse,
str::stream() << "The array filter for identifier '" << arrayFilter.first
- << "' was not used in the update "
- << updateExpr,
+ << "' was not used in the update " << updateExpr,
foundIdentifiers.find(arrayFilter.first.toString()) != foundIdentifiers.end());
}
diff --git a/src/mongo/db/update/update_leaf_node.cpp b/src/mongo/db/update/update_leaf_node.cpp
index 5d1f8931b53..b09919772a2 100644
--- a/src/mongo/db/update/update_leaf_node.cpp
+++ b/src/mongo/db/update/update_leaf_node.cpp
@@ -52,13 +52,9 @@ void UpdateLeafNode::checkViability(mutablebson::Element element,
} else {
uasserted(ErrorCodes::PathNotViable,
str::stream() << "Cannot use the part (" << pathToCreate.getPart(0) << ") of ("
- << pathTaken.dottedField()
- << "."
- << pathToCreate.dottedField()
- << ") to traverse the element ({"
- << element.toString()
- << "})");
+ << pathTaken.dottedField() << "." << pathToCreate.dottedField()
+ << ") to traverse the element ({" << element.toString() << "})");
}
}
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/update/update_object_node.cpp b/src/mongo/db/update/update_object_node.cpp
index 3ca3a85f797..4686d2895f7 100644
--- a/src/mongo/db/update/update_object_node.cpp
+++ b/src/mongo/db/update/update_object_node.cpp
@@ -61,8 +61,7 @@ StatusWith<std::string> parseArrayFilterIdentifier(
return Status(ErrorCodes::BadValue,
str::stream() << "Cannot have array filter identifier (i.e. '$[<id>]') "
"element in the first position in path '"
- << fieldRef.dottedField()
- << "'");
+ << fieldRef.dottedField() << "'");
}
auto identifier = field.substr(2, field.size() - 3);
@@ -70,9 +69,7 @@ StatusWith<std::string> parseArrayFilterIdentifier(
if (!identifier.empty() && arrayFilters.find(identifier) == arrayFilters.end()) {
return Status(ErrorCodes::BadValue,
str::stream() << "No array filter found for identifier '" << identifier
- << "' in path '"
- << fieldRef.dottedField()
- << "'");
+ << "' in path '" << fieldRef.dottedField() << "'");
}
if (!identifier.empty()) {
@@ -189,7 +186,7 @@ void applyChild(const UpdateNode& child,
BSONObj makeBSONForOperator(const std::vector<std::pair<std::string, BSONObj>>& updatesForOp) {
BSONObjBuilder bob;
- for (const auto & [ path, value ] : updatesForOp)
+ for (const auto& [path, value] : updatesForOp)
bob << path << value.firstElement();
return bob.obj();
}
@@ -227,8 +224,8 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
// be a string value.
if (BSONType::String != modExpr.type()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "The 'to' field for $rename must be a string: "
- << modExpr);
+ str::stream()
+ << "The 'to' field for $rename must be a string: " << modExpr);
}
fieldRef.parse(modExpr.valueStringData());
@@ -249,8 +246,7 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
if (positional && positionalCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << fieldRef.dottedField()
- << "'");
+ << fieldRef.dottedField() << "'");
}
if (positional && positionalIndex == 0) {
@@ -258,8 +254,7 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
ErrorCodes::BadValue,
str::stream()
<< "Cannot have positional (i.e. '$') element in the first position in path '"
- << fieldRef.dottedField()
- << "'");
+ << fieldRef.dottedField() << "'");
}
// Construct and initialize the leaf node.
@@ -297,8 +292,7 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
return Status(ErrorCodes::ConflictingUpdateOperators,
str::stream() << "Updating the path '" << fieldRef.dottedField()
<< "' would create a conflict at '"
- << fieldRef.dottedSubstring(0, i + 1)
- << "'");
+ << fieldRef.dottedSubstring(0, i + 1) << "'");
}
} else {
std::unique_ptr<UpdateInternalNode> ownedChild;
@@ -334,10 +328,9 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
if (current->getChild(childName)) {
return Status(ErrorCodes::ConflictingUpdateOperators,
- str::stream() << "Updating the path '" << fieldRef.dottedField()
- << "' would create a conflict at '"
- << fieldRef.dottedField()
- << "'");
+ str::stream()
+ << "Updating the path '" << fieldRef.dottedField()
+ << "' would create a conflict at '" << fieldRef.dottedField() << "'");
}
current->setChild(std::move(childName), std::move(leaf));
@@ -388,12 +381,12 @@ BSONObj UpdateObjectNode::serialize() const {
BSONObjBuilder bob;
- for (const auto & [ pathPrefix, child ] : _children) {
+ for (const auto& [pathPrefix, child] : _children) {
auto path = FieldRef(pathPrefix);
child->produceSerializationMap(&path, &operatorOrientedUpdates);
}
- for (const auto & [ op, updates ] : operatorOrientedUpdates)
+ for (const auto& [op, updates] : operatorOrientedUpdates)
bob << op << makeBSONForOperator(updates);
return bob.obj();
diff --git a/src/mongo/db/update/update_object_node.h b/src/mongo/db/update/update_object_node.h
index 5cbae91f1a5..cbb462da152 100644
--- a/src/mongo/db/update/update_object_node.h
+++ b/src/mongo/db/update/update_object_node.h
@@ -111,7 +111,7 @@ public:
FieldRef* currentPath,
std::map<std::string, std::vector<std::pair<std::string, BSONObj>>>*
operatorOrientedUpdates) const final {
- for (const auto & [ pathSuffix, child ] : _children) {
+ for (const auto& [pathSuffix, child] : _children) {
FieldRef::FieldRefTempAppend tempAppend(*currentPath, pathSuffix);
child->produceSerializationMap(currentPath, operatorOrientedUpdates);
}
diff --git a/src/mongo/db/update/update_serialization_test.cpp b/src/mongo/db/update/update_serialization_test.cpp
index 046efec9825..89ae2ac03c4 100644
--- a/src/mongo/db/update/update_serialization_test.cpp
+++ b/src/mongo/db/update/update_serialization_test.cpp
@@ -248,4 +248,4 @@ TEST(UpdateSerialization, CompoundStatementsSerialize) {
}
} // namespace
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/update_index_data.cpp b/src/mongo/db/update_index_data.cpp
index 539fcc27b67..8aad16e5552 100644
--- a/src/mongo/db/update_index_data.cpp
+++ b/src/mongo/db/update_index_data.cpp
@@ -112,4 +112,4 @@ FieldRef UpdateIndexData::getCanonicalIndexField(const FieldRef& path) {
return buf;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/update_index_data.h b/src/mongo/db/update_index_data.h
index aee2c968742..9477eab10e1 100644
--- a/src/mongo/db/update_index_data.h
+++ b/src/mongo/db/update_index_data.h
@@ -83,4 +83,4 @@ private:
bool _allPathsIndexed;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/update_index_data_test.cpp b/src/mongo/db/update_index_data_test.cpp
index c55f0235d09..ae230e70f30 100644
--- a/src/mongo/db/update_index_data_test.cpp
+++ b/src/mongo/db/update_index_data_test.cpp
@@ -129,4 +129,4 @@ TEST(UpdateIndexDataTest, CanonicalIndexFieldForNestedNumericFieldNames) {
ASSERT_EQ(UpdateIndexData::getCanonicalIndexField(FieldRef("a.0.b.1.2")), FieldRef("a.b"_sd));
ASSERT_EQ(UpdateIndexData::getCanonicalIndexField(FieldRef("a.01.02.b.c")), FieldRef("a"_sd));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp
index 193f7a6f432..3d969308c9a 100644
--- a/src/mongo/db/views/durable_view_catalog.cpp
+++ b/src/mongo/db/views/durable_view_catalog.cpp
@@ -170,9 +170,7 @@ BSONObj DurableViewCatalogImpl::_validateViewDefinition(OperationContext* opCtx,
uassert(ErrorCodes::InvalidViewDefinition,
str::stream() << "found invalid view definition " << viewDefinition["_id"]
- << " while reading '"
- << _db->getSystemViewsName()
- << "'",
+ << " while reading '" << _db->getSystemViewsName() << "'",
valid);
return viewDefinition;
diff --git a/src/mongo/db/views/resolved_view_test.cpp b/src/mongo/db/views/resolved_view_test.cpp
index b15ccab582d..a4b5111419a 100644
--- a/src/mongo/db/views/resolved_view_test.cpp
+++ b/src/mongo/db/views/resolved_view_test.cpp
@@ -57,9 +57,8 @@ TEST(ResolvedViewTest, ExpandingAggRequestWithEmptyPipelineOnNoOpViewYieldsEmpty
AggregationRequest requestOnView{viewNss, emptyPipeline};
auto result = resolvedView.asExpandedViewAggregation(requestOnView);
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
- << kDefaultCursorOptionDocument);
+ BSONObj expected = BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray()
+ << "cursor" << kDefaultCursorOptionDocument);
ASSERT_BSONOBJ_EQ(result.serializeToCommandObj().toBson(), expected);
}
@@ -72,8 +71,7 @@ TEST(ResolvedViewTest, ExpandingAggRequestWithNonemptyPipelineAppendsToViewPipel
BSONObj expected = BSON("aggregate" << backingNss.coll() << "pipeline"
<< BSON_ARRAY(BSON("skip" << 7) << BSON("limit" << 3))
- << "cursor"
- << kDefaultCursorOptionDocument);
+ << "cursor" << kDefaultCursorOptionDocument);
ASSERT_BSONOBJ_EQ(result.serializeToCommandObj().toBson(), expected);
}
@@ -216,9 +214,8 @@ TEST(ResolvedViewTest, FromBSONFailsOnInvalidPipelineType) {
}
TEST(ResolvedViewTest, FromBSONFailsOnInvalidCollationType) {
- BSONObj badCmdResponse =
- BSON("resolvedView" << BSON(
- "ns" << backingNss.ns() << "pipeline" << BSONArray() << "collation" << 1));
+ BSONObj badCmdResponse = BSON("resolvedView" << BSON("ns" << backingNss.ns() << "pipeline"
+ << BSONArray() << "collation" << 1));
ASSERT_THROWS_CODE(ResolvedView::fromBSON(badCmdResponse), AssertionException, 40639);
}
@@ -234,10 +231,10 @@ TEST(ResolvedViewTest, FromBSONSuccessfullyParsesEmptyBSONArrayIntoEmptyVector)
}
TEST(ResolvedViewTest, FromBSONSuccessfullyParsesCollation) {
- BSONObj cmdResponse = BSON(
- "resolvedView" << BSON("ns" << backingNss.ns() << "pipeline" << BSONArray() << "collation"
- << BSON("locale"
- << "fil")));
+ BSONObj cmdResponse = BSON("resolvedView" << BSON("ns" << backingNss.ns() << "pipeline"
+ << BSONArray() << "collation"
+ << BSON("locale"
+ << "fil")));
const ResolvedView result = ResolvedView::fromBSON(cmdResponse);
ASSERT_EQ(result.getNamespace(), backingNss);
ASSERT(std::equal(emptyPipeline.begin(),
@@ -257,8 +254,7 @@ TEST(ResolvedViewTest, FromBSONSuccessfullyParsesPopulatedBSONArrayIntoVector) {
BSONArray pipeline = BSON_ARRAY(matchStage << sortStage << limitStage);
BSONObj cmdResponse = BSON("resolvedView" << BSON("ns"
<< "testdb.testcoll"
- << "pipeline"
- << pipeline));
+ << "pipeline" << pipeline));
const ResolvedView result = ResolvedView::fromBSON(cmdResponse);
ASSERT_EQ(result.getNamespace(), backingNss);
@@ -274,8 +270,7 @@ TEST(ResolvedViewTest, IsResolvedViewErrorResponseDetectsKickbackErrorCodeSucces
BSONObj errorResponse =
BSON("ok" << 0 << "code" << ErrorCodes::CommandOnShardedViewNotSupportedOnMongod << "errmsg"
<< "This view is sharded and cannot be run on mongod"
- << "resolvedView"
- << BSON("ns" << backingNss.ns() << "pipeline" << BSONArray()));
+ << "resolvedView" << BSON("ns" << backingNss.ns() << "pipeline" << BSONArray()));
auto status = getStatusFromCommandResult(errorResponse);
ASSERT_EQ(status, ErrorCodes::CommandOnShardedViewNotSupportedOnMongod);
ASSERT(status.extraInfo<ResolvedView>());
diff --git a/src/mongo/db/views/view_catalog.cpp b/src/mongo/db/views/view_catalog.cpp
index 237a9495cf2..6019a012b1a 100644
--- a/src/mongo/db/views/view_catalog.cpp
+++ b/src/mongo/db/views/view_catalog.cpp
@@ -115,8 +115,7 @@ Status ViewCatalog::_reload(WithLock,
return Status(ErrorCodes::InvalidViewDefinition,
str::stream() << "View 'pipeline' entries must be objects, but "
<< viewName.toString()
- << " has a pipeline element of type "
- << stage.type());
+ << " has a pipeline element of type " << stage.type());
}
}
diff --git a/src/mongo/db/views/view_catalog_test.cpp b/src/mongo/db/views/view_catalog_test.cpp
index 78412ada08d..94d3d011c22 100644
--- a/src/mongo/db/views/view_catalog_test.cpp
+++ b/src/mongo/db/views/view_catalog_test.cpp
@@ -257,8 +257,7 @@ TEST_F(ViewCatalogFixture, CanCreateViewWithLookupUsingPipelineSyntax) {
<< "fcoll"
<< "as"
<< "as"
- << "pipeline"
- << BSONArray()))),
+ << "pipeline" << BSONArray()))),
emptyCollation));
}
diff --git a/src/mongo/db/views/view_graph.cpp b/src/mongo/db/views/view_graph.cpp
index 7ecc1544e31..def5d50154d 100644
--- a/src/mongo/db/views/view_graph.cpp
+++ b/src/mongo/db/views/view_graph.cpp
@@ -110,8 +110,7 @@ Status ViewGraph::insertAndValidate(const ViewDefinition& view,
return {ErrorCodes::ViewPipelineMaxSizeExceeded,
str::stream() << "Operation would result in a resolved view pipeline that exceeds "
"the maximum size of "
- << kMaxViewPipelineSizeBytes
- << " bytes"};
+ << kMaxViewPipelineSizeBytes << " bytes"};
}
guard.dismiss();
@@ -217,8 +216,7 @@ Status ViewGraph::_validateParents(uint64_t currentId, int currentDepth, StatsMa
if (size > kMaxViewPipelineSizeBytes) {
return {ErrorCodes::ViewPipelineMaxSizeExceeded,
str::stream() << "View pipeline is too large and exceeds the maximum size of "
- << ViewGraph::kMaxViewPipelineSizeBytes
- << " bytes"};
+ << ViewGraph::kMaxViewPipelineSizeBytes << " bytes"};
}
return Status::OK();
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index a2cc613282d..ca87ea7d50c 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -51,9 +51,9 @@
namespace mongo {
-using std::string;
using repl::OpTime;
using repl::OpTimeAndWallTime;
+using std::string;
static TimerStats gleWtimeStats;
static ServerStatusMetricField<TimerStats> displayGleLatency("getLastError.wtime", &gleWtimeStats);