summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/base/clonable_ptr.h5
-rw-r--r--src/mongo/base/concept/assignable.h8
-rw-r--r--src/mongo/base/concept/clonable.h20
-rw-r--r--src/mongo/base/concept/clone_factory.h22
-rw-r--r--src/mongo/base/concept/constructible.h49
-rw-r--r--src/mongo/base/concept/convertible_to.h16
-rw-r--r--src/mongo/base/concept/copy_assignable.h20
-rw-r--r--src/mongo/base/concept/copy_constructible.h16
-rw-r--r--src/mongo/base/concept/unique_ptr.h50
-rw-r--r--src/mongo/base/data_type_validated_test.cpp2
-rw-r--r--src/mongo/base/encoded_value_storage_test.cpp2
-rw-r--r--src/mongo/base/global_initializer_registerer.h72
-rw-r--r--src/mongo/base/initializer.h16
-rw-r--r--src/mongo/base/initializer_function.h10
-rw-r--r--src/mongo/base/parse_number_test.cpp13
-rw-r--r--src/mongo/base/unwind_test.cpp15
-rw-r--r--src/mongo/bson/bson_obj_test.cpp5
-rw-r--r--src/mongo/bson/bson_validate_test.cpp45
-rw-r--r--src/mongo/bson/bsonelement.cpp14
-rw-r--r--src/mongo/bson/bsonelement.h5
-rw-r--r--src/mongo/bson/bsonelement_test.cpp25
-rw-r--r--src/mongo/bson/bsonmisc.h2
-rw-r--r--src/mongo/bson/bsonobj.cpp4
-rw-r--r--src/mongo/bson/bsonobj.h12
-rw-r--r--src/mongo/bson/bsonobjbuilder.h2
-rw-r--r--src/mongo/bson/bsonobjbuilder_test.cpp5
-rw-r--r--src/mongo/bson/json.cpp4
-rw-r--r--src/mongo/bson/oid_test.cpp2
-rw-r--r--src/mongo/bson/ordering.h2
-rw-r--r--src/mongo/bson/timestamp.cpp2
-rw-r--r--src/mongo/bson/ugly_bson_integration_test.cpp5
-rw-r--r--src/mongo/bson/util/bson_check.h13
-rw-r--r--src/mongo/bson/util/bson_check_test.cpp13
-rw-r--r--src/mongo/bson/util/bson_extract.cpp25
-rw-r--r--src/mongo/bson/util/bson_extract_test.cpp6
-rw-r--r--src/mongo/bson/util/builder_test.cpp2
-rw-r--r--src/mongo/client/authenticate.cpp28
-rw-r--r--src/mongo/client/authenticate_test.cpp12
-rw-r--r--src/mongo/client/connection_string_connect.cpp2
-rw-r--r--src/mongo/client/constants.h2
-rw-r--r--src/mongo/client/cyrus_sasl_client_session.cpp5
-rw-r--r--src/mongo/client/dbclient_base.cpp25
-rw-r--r--src/mongo/client/dbclient_base.h8
-rw-r--r--src/mongo/client/dbclient_connection.cpp18
-rw-r--r--src/mongo/client/dbclient_cursor.cpp4
-rw-r--r--src/mongo/client/dbclient_cursor_test.cpp2
-rw-r--r--src/mongo/client/dbclient_rs.cpp49
-rw-r--r--src/mongo/client/dbclient_rs.h6
-rw-r--r--src/mongo/client/fetcher.cpp46
-rw-r--r--src/mongo/client/fetcher_test.cpp160
-rw-r--r--src/mongo/client/mongo_uri.cpp17
-rw-r--r--src/mongo/client/mongo_uri_test.cpp28
-rw-r--r--src/mongo/client/native_sasl_client_session.cpp2
-rw-r--r--src/mongo/client/query_spec.h6
-rw-r--r--src/mongo/client/read_preference.cpp27
-rw-r--r--src/mongo/client/read_preference_test.cpp24
-rw-r--r--src/mongo/client/remote_command_retry_scheduler_test.cpp3
-rw-r--r--src/mongo/client/replica_set_monitor.cpp39
-rw-r--r--src/mongo/client/replica_set_monitor_internal_test.cpp156
-rw-r--r--src/mongo/client/replica_set_monitor_manager.cpp8
-rw-r--r--src/mongo/client/replica_set_monitor_scan_test.cpp513
-rw-r--r--src/mongo/client/sasl_client_authenticate.h2
-rw-r--r--src/mongo/client/sasl_client_authenticate_impl.cpp2
-rw-r--r--src/mongo/client/sasl_scram_client_conversation.cpp9
-rw-r--r--src/mongo/client/sasl_sspi.cpp9
-rw-r--r--src/mongo/crypto/aead_encryption.cpp27
-rw-r--r--src/mongo/crypto/hash_block.h10
-rw-r--r--src/mongo/crypto/mechanism_scram.h9
-rw-r--r--src/mongo/crypto/symmetric_crypto_apple.cpp4
-rw-r--r--src/mongo/crypto/symmetric_crypto_openssl.cpp9
-rw-r--r--src/mongo/db/auth/authorization_manager_impl.cpp3
-rw-r--r--src/mongo/db/auth/authorization_manager_test.cpp30
-rw-r--r--src/mongo/db/auth/authorization_session_impl.cpp21
-rw-r--r--src/mongo/db/auth/authorization_session_test.cpp103
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp28
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp3
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_s.cpp55
-rw-r--r--src/mongo/db/auth/privilege_parser_test.cpp27
-rw-r--r--src/mongo/db/auth/role_graph.cpp42
-rw-r--r--src/mongo/db/auth/role_graph_test.cpp63
-rw-r--r--src/mongo/db/auth/role_graph_update.cpp21
-rw-r--r--src/mongo/db/auth/sasl_authentication_session_test.cpp24
-rw-r--r--src/mongo/db/auth/sasl_mechanism_registry.cpp7
-rw-r--r--src/mongo/db/auth/sasl_mechanism_registry_test.cpp11
-rw-r--r--src/mongo/db/auth/sasl_options_init.cpp2
-rw-r--r--src/mongo/db/auth/sasl_plain_server_conversation.cpp5
-rw-r--r--src/mongo/db/auth/sasl_plain_server_conversation.h5
-rw-r--r--src/mongo/db/auth/sasl_scram_server_conversation.cpp15
-rw-r--r--src/mongo/db/auth/sasl_scram_test.cpp17
-rw-r--r--src/mongo/db/auth/security_file.cpp4
-rw-r--r--src/mongo/db/auth/user.cpp2
-rw-r--r--src/mongo/db/auth/user_document_parser.cpp8
-rw-r--r--src/mongo/db/auth/user_document_parser_test.cpp102
-rw-r--r--src/mongo/db/auth/user_management_commands_parser.cpp15
-rw-r--r--src/mongo/db/baton.cpp2
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp14
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp7
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp17
-rw-r--r--src/mongo/db/catalog/collection_catalog.h5
-rw-r--r--src/mongo/db/catalog/collection_catalog_test.cpp2
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp4
-rw-r--r--src/mongo/db/catalog/collection_compact.h6
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp37
-rw-r--r--src/mongo/db/catalog/collection_options.cpp8
-rw-r--r--src/mongo/db/catalog/collection_options.h2
-rw-r--r--src/mongo/db/catalog/collection_validation.cpp6
-rw-r--r--src/mongo/db/catalog/create_collection.cpp9
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp8
-rw-r--r--src/mongo/db/catalog/database_impl.cpp22
-rw-r--r--src/mongo/db/catalog/database_test.cpp73
-rw-r--r--src/mongo/db/catalog/document_validation.h2
-rw-r--r--src/mongo/db/catalog/drop_database.cpp25
-rw-r--r--src/mongo/db/catalog/drop_database_test.cpp8
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp33
-rw-r--r--src/mongo/db/catalog/health_log.cpp4
-rw-r--r--src/mongo/db/catalog/health_log.h2
-rw-r--r--src/mongo/db/catalog/index_build_block.cpp20
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp3
-rw-r--r--src/mongo/db/catalog/index_builds_manager_test.cpp3
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp12
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp81
-rw-r--r--src/mongo/db/catalog/index_consistency.cpp3
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp74
-rw-r--r--src/mongo/db/catalog/index_key_validate_test.cpp6
-rw-r--r--src/mongo/db/catalog/index_spec_validate_test.cpp379
-rw-r--r--src/mongo/db/catalog/index_timestamp_helper.h4
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp17
-rw-r--r--src/mongo/db/catalog/record_store_validate_adaptor.cpp20
-rw-r--r--src/mongo/db/catalog/record_store_validate_adaptor.h2
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp29
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp37
-rw-r--r--src/mongo/db/catalog/util/partitioned.h2
-rw-r--r--src/mongo/db/catalog/util/partitioned_test.cpp1
-rw-r--r--src/mongo/db/catalog_raii.cpp6
-rw-r--r--src/mongo/db/client.cpp4
-rw-r--r--src/mongo/db/clientcursor.cpp2
-rw-r--r--src/mongo/db/cloner.cpp46
-rw-r--r--src/mongo/db/collection_index_usage_tracker.cpp2
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp3
-rw-r--r--src/mongo/db/commands/compact.cpp2
-rw-r--r--src/mongo/db/commands/connection_status.cpp2
-rw-r--r--src/mongo/db/commands/count_cmd.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp33
-rw-r--r--src/mongo/db/commands/dbcheck.cpp4
-rw-r--r--src/mongo/db/commands/dbcommands.cpp4
-rw-r--r--src/mongo/db/commands/dbcommands_d.cpp8
-rw-r--r--src/mongo/db/commands/dbhash.cpp9
-rw-r--r--src/mongo/db/commands/driverHelpers.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp2
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp3
-rw-r--r--src/mongo/db/commands/fail_point_cmd.cpp2
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp9
-rw-r--r--src/mongo/db/commands/feature_compatibility_version_command_parser.cpp20
-rw-r--r--src/mongo/db/commands/feature_compatibility_version_documentation.h4
-rw-r--r--src/mongo/db/commands/feature_compatibility_version_parser.cpp75
-rw-r--r--src/mongo/db/commands/find_cmd.cpp10
-rw-r--r--src/mongo/db/commands/fsync.cpp4
-rw-r--r--src/mongo/db/commands/fsync_locked.h10
-rw-r--r--src/mongo/db/commands/generic_servers.cpp4
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp22
-rw-r--r--src/mongo/db/commands/hashcmd.cpp2
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp2
-rw-r--r--src/mongo/db/commands/list_databases.cpp2
-rw-r--r--src/mongo/db/commands/list_indexes.cpp4
-rw-r--r--src/mongo/db/commands/lock_info.cpp2
-rw-r--r--src/mongo/db/commands/mr.cpp22
-rw-r--r--src/mongo/db/commands/mr.h16
-rw-r--r--src/mongo/db/commands/mr_common.cpp4
-rw-r--r--src/mongo/db/commands/mr_test.cpp6
-rw-r--r--src/mongo/db/commands/parameters.cpp17
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp2
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp25
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp4
-rw-r--r--src/mongo/db/commands/server_status_internal.cpp2
-rw-r--r--src/mongo/db/commands/server_status_internal.h2
-rw-r--r--src/mongo/db/commands/server_status_metric.cpp2
-rw-r--r--src/mongo/db/commands/server_status_metric.h2
-rw-r--r--src/mongo/db/commands/sleep_command.cpp2
-rw-r--r--src/mongo/db/commands/snapshot_management.cpp2
-rw-r--r--src/mongo/db/commands/test_commands.cpp2
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp80
-rw-r--r--src/mongo/db/commands/user_management_commands_common.cpp35
-rw-r--r--src/mongo/db/commands/validate.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp2
-rw-r--r--src/mongo/db/concurrency/d_concurrency_bm.cpp4
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp2
-rw-r--r--src/mongo/db/concurrency/lock_manager.cpp5
-rw-r--r--src/mongo/db/concurrency/lock_manager.h52
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp12
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp11
-rw-r--r--src/mongo/db/concurrency/write_conflict_exception.cpp12
-rw-r--r--src/mongo/db/curop.cpp9
-rw-r--r--src/mongo/db/curop_failpoint_helpers.cpp2
-rw-r--r--src/mongo/db/curop_failpoint_helpers.h2
-rw-r--r--src/mongo/db/db.cpp4
-rw-r--r--src/mongo/db/db_raii.cpp3
-rw-r--r--src/mongo/db/dbdirectclient.cpp2
-rw-r--r--src/mongo/db/dbhelpers.cpp2
-rw-r--r--src/mongo/db/dbmessage.cpp4
-rw-r--r--src/mongo/db/dbmessage.h6
-rw-r--r--src/mongo/db/dbmessage_test.cpp2
-rw-r--r--src/mongo/db/exec/and_sorted.cpp2
-rw-r--r--src/mongo/db/exec/change_stream_proxy.cpp3
-rw-r--r--src/mongo/db/exec/collection_scan.cpp6
-rw-r--r--src/mongo/db/exec/count_scan.cpp2
-rw-r--r--src/mongo/db/exec/geo_near.cpp18
-rw-r--r--src/mongo/db/exec/queued_data_stage_test.cpp2
-rw-r--r--src/mongo/db/exec/record_store_fast_count.h2
-rw-r--r--src/mongo/db/exec/requires_collection_stage.cpp3
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp10
-rw-r--r--src/mongo/db/exec/text_or.cpp2
-rw-r--r--src/mongo/db/exec/update_stage.cpp3
-rw-r--r--src/mongo/db/exec/write_stage_common.h4
-rw-r--r--src/mongo/db/exhaust_cursor_currentop_integration_test.cpp14
-rw-r--r--src/mongo/db/field_parser_test.cpp76
-rw-r--r--src/mongo/db/field_ref_set.cpp4
-rw-r--r--src/mongo/db/free_mon/free_mon_controller.h32
-rw-r--r--src/mongo/db/free_mon/free_mon_controller_test.cpp201
-rw-r--r--src/mongo/db/free_mon/free_mon_message.h24
-rw-r--r--src/mongo/db/free_mon/free_mon_mongod.cpp31
-rw-r--r--src/mongo/db/free_mon/free_mon_op_observer.cpp5
-rw-r--r--src/mongo/db/free_mon/free_mon_options.h4
-rw-r--r--src/mongo/db/free_mon/free_mon_processor.cpp60
-rw-r--r--src/mongo/db/free_mon/free_mon_queue_test.cpp2
-rw-r--r--src/mongo/db/ftdc/compressor_test.cpp223
-rw-r--r--src/mongo/db/ftdc/controller.h16
-rw-r--r--src/mongo/db/ftdc/controller_test.cpp4
-rw-r--r--src/mongo/db/ftdc/file_manager.cpp10
-rw-r--r--src/mongo/db/ftdc/file_manager_test.cpp92
-rw-r--r--src/mongo/db/ftdc/file_reader.cpp3
-rw-r--r--src/mongo/db/ftdc/file_writer.cpp3
-rw-r--r--src/mongo/db/ftdc/file_writer_test.cpp86
-rw-r--r--src/mongo/db/ftdc/ftdc_server.cpp1
-rw-r--r--src/mongo/db/ftdc/ftdc_system_stats.h1
-rw-r--r--src/mongo/db/ftdc/ftdc_system_stats_linux.cpp5
-rw-r--r--src/mongo/db/ftdc/util.cpp4
-rw-r--r--src/mongo/db/ftdc/util.h24
-rw-r--r--src/mongo/db/ftdc/varint.h4
-rw-r--r--src/mongo/db/fts/fts_element_iterator.cpp9
-rw-r--r--src/mongo/db/fts/fts_index_format.cpp8
-rw-r--r--src/mongo/db/fts/fts_index_format.h4
-rw-r--r--src/mongo/db/fts/fts_index_format_test.cpp18
-rw-r--r--src/mongo/db/fts/fts_language.cpp14
-rw-r--r--src/mongo/db/fts/fts_language.h4
-rw-r--r--src/mongo/db/fts/fts_language_test.cpp4
-rw-r--r--src/mongo/db/fts/fts_matcher.cpp4
-rw-r--r--src/mongo/db/fts/fts_matcher.h4
-rw-r--r--src/mongo/db/fts/fts_matcher_test.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_impl.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_impl.h4
-rw-r--r--src/mongo/db/fts/fts_query_impl_test.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_parser.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_parser.h4
-rw-r--r--src/mongo/db/fts/fts_spec.cpp22
-rw-r--r--src/mongo/db/fts/fts_spec_legacy.cpp10
-rw-r--r--src/mongo/db/fts/fts_spec_test.cpp22
-rw-r--r--src/mongo/db/fts/fts_util.cpp4
-rw-r--r--src/mongo/db/fts/fts_util.h4
-rw-r--r--src/mongo/db/fts/stemmer.cpp4
-rw-r--r--src/mongo/db/fts/stemmer.h4
-rw-r--r--src/mongo/db/fts/stemmer_test.cpp4
-rw-r--r--src/mongo/db/fts/stop_words.cpp6
-rw-r--r--src/mongo/db/fts/stop_words.h4
-rw-r--r--src/mongo/db/fts/stop_words_test.cpp4
-rw-r--r--src/mongo/db/fts/tokenizer.cpp4
-rw-r--r--src/mongo/db/fts/tokenizer.h4
-rw-r--r--src/mongo/db/fts/tokenizer_test.cpp4
-rw-r--r--src/mongo/db/fts/unicode/string.cpp2
-rw-r--r--src/mongo/db/fts/unicode/string_test.cpp2
-rw-r--r--src/mongo/db/geo/big_polygon.cpp2
-rw-r--r--src/mongo/db/geo/big_polygon.h2
-rw-r--r--src/mongo/db/geo/big_polygon_test.cpp169
-rw-r--r--src/mongo/db/geo/geoparser.cpp3
-rw-r--r--src/mongo/db/geo/geoparser_test.cpp2
-rw-r--r--src/mongo/db/geo/hash.cpp13
-rw-r--r--src/mongo/db/geo/hash_test.cpp2
-rw-r--r--src/mongo/db/geo/r2_region_coverer.cpp2
-rw-r--r--src/mongo/db/geo/shapes.h5
-rw-r--r--src/mongo/db/hasher.h2
-rw-r--r--src/mongo/db/hasher_test.cpp3
-rw-r--r--src/mongo/db/index/btree_key_generator.cpp4
-rw-r--r--src/mongo/db/index/btree_key_generator_test.cpp2
-rw-r--r--src/mongo/db/index/expression_params.cpp10
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp10
-rw-r--r--src/mongo/db/index/index_build_interceptor.h6
-rw-r--r--src/mongo/db/index/index_descriptor.cpp5
-rw-r--r--src/mongo/db/index/s2_access_method.cpp36
-rw-r--r--src/mongo/db/index/s2_key_generator_test.cpp21
-rw-r--r--src/mongo/db/index/sort_key_generator_test.cpp3
-rw-r--r--src/mongo/db/index_builder.h2
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp42
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp10
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod_test.cpp3
-rw-r--r--src/mongo/db/initialize_server_global_state.cpp10
-rw-r--r--src/mongo/db/initialize_server_security_state.cpp4
-rw-r--r--src/mongo/db/introspect.cpp2
-rw-r--r--src/mongo/db/keypattern.cpp3
-rw-r--r--src/mongo/db/keypattern_test.cpp2
-rw-r--r--src/mongo/db/keys_collection_cache.cpp6
-rw-r--r--src/mongo/db/keys_collection_client.h4
-rw-r--r--src/mongo/db/keys_collection_client_direct.h4
-rw-r--r--src/mongo/db/keys_collection_client_sharded.h4
-rw-r--r--src/mongo/db/lasterror.cpp2
-rw-r--r--src/mongo/db/log_process_details.cpp2
-rw-r--r--src/mongo/db/logical_clock.cpp5
-rw-r--r--src/mongo/db/logical_session_cache_test.cpp5
-rw-r--r--src/mongo/db/logical_session_id_test.cpp48
-rw-r--r--src/mongo/db/logical_time_test.cpp10
-rw-r--r--src/mongo/db/matcher/expression.cpp2
-rw-r--r--src/mongo/db/matcher/expression.h2
-rw-r--r--src/mongo/db/matcher/expression_array.cpp2
-rw-r--r--src/mongo/db/matcher/expression_array.h6
-rw-r--r--src/mongo/db/matcher/expression_geo.cpp21
-rw-r--r--src/mongo/db/matcher/expression_geo_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_leaf.cpp2
-rw-r--r--src/mongo/db/matcher/expression_leaf.h2
-rw-r--r--src/mongo/db/matcher/expression_leaf_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_parser.cpp45
-rw-r--r--src/mongo/db/matcher/expression_parser_array_test.cpp154
-rw-r--r--src/mongo/db/matcher/expression_parser_leaf_test.cpp163
-rw-r--r--src/mongo/db/matcher/expression_parser_test.cpp9
-rw-r--r--src/mongo/db/matcher/expression_parser_tree_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_text.cpp6
-rw-r--r--src/mongo/db/matcher/expression_text_base.cpp6
-rw-r--r--src/mongo/db/matcher/expression_tree.cpp2
-rw-r--r--src/mongo/db/matcher/expression_tree.h2
-rw-r--r--src/mongo/db/matcher/expression_tree_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_type_test.cpp4
-rw-r--r--src/mongo/db/matcher/expression_where.cpp4
-rw-r--r--src/mongo/db/matcher/expression_where_noop.cpp2
-rw-r--r--src/mongo/db/matcher/expression_with_placeholder.cpp11
-rw-r--r--src/mongo/db/matcher/match_details.cpp2
-rw-r--r--src/mongo/db/matcher/match_details.h2
-rw-r--r--src/mongo/db/matcher/matchable.cpp2
-rw-r--r--src/mongo/db/matcher/matchable.h4
-rw-r--r--src/mongo/db/matcher/path.cpp2
-rw-r--r--src/mongo/db/matcher/path.h2
-rw-r--r--src/mongo/db/matcher/path_accepting_keyword_test.cpp45
-rw-r--r--src/mongo/db/matcher/path_test.cpp2
-rw-r--r--src/mongo/db/matcher/schema/expression_internal_schema_max_length.h4
-rw-r--r--src/mongo/db/matcher/schema/expression_internal_schema_min_length.h4
-rw-r--r--src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp3
-rw-r--r--src/mongo/db/matcher/schema/json_pointer_test.cpp10
-rw-r--r--src/mongo/db/matcher/schema/json_schema_parser.cpp151
-rw-r--r--src/mongo/db/mongod_options.cpp9
-rw-r--r--src/mongo/db/mongod_options.h2
-rw-r--r--src/mongo/db/multi_key_path_tracker.cpp4
-rw-r--r--src/mongo/db/multi_key_path_tracker_test.cpp3
-rw-r--r--src/mongo/db/namespace_string.cpp4
-rw-r--r--src/mongo/db/op_observer_impl.cpp10
-rw-r--r--src/mongo/db/op_observer_impl_test.cpp528
-rw-r--r--src/mongo/db/op_observer_util.h2
-rw-r--r--src/mongo/db/operation_time_tracker.cpp2
-rw-r--r--src/mongo/db/ops/delete.h2
-rw-r--r--src/mongo/db/ops/insert.cpp14
-rw-r--r--src/mongo/db/ops/insert.h2
-rw-r--r--src/mongo/db/ops/update.cpp3
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp8
-rw-r--r--src/mongo/db/ops/write_ops_parsers.cpp9
-rw-r--r--src/mongo/db/ops/write_ops_parsers_test.cpp80
-rw-r--r--src/mongo/db/ops/write_ops_retryability.cpp41
-rw-r--r--src/mongo/db/ops/write_ops_retryability_test.cpp45
-rw-r--r--src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp18
-rw-r--r--src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator.h2
-rw-r--r--src/mongo/db/pipeline/accumulator_avg.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_first.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_last.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_merge_objects.cpp3
-rw-r--r--src/mongo/db/pipeline/accumulator_min_max.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_push.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_std_dev.cpp2
-rw-r--r--src/mongo/db/pipeline/aggregation_request.cpp16
-rw-r--r--src/mongo/db/pipeline/dependencies.cpp2
-rw-r--r--src/mongo/db/pipeline/dependencies.h2
-rw-r--r--src/mongo/db/pipeline/dependencies_test.cpp3
-rw-r--r--src/mongo/db/pipeline/document.cpp5
-rw-r--r--src/mongo/db/pipeline/document.h2
-rw-r--r--src/mongo/db/pipeline/document_internal.h2
-rw-r--r--src/mongo/db/pipeline/document_source_add_fields.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_bucket.cpp39
-rw-r--r--src/mongo/db/pipeline/document_source_bucket_auto_test.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream.cpp18
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream_test.cpp29
-rw-r--r--src/mongo/db/pipeline/document_source_coll_stats.cpp21
-rw-r--r--src/mongo/db/pipeline/document_source_current_op.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_current_op.h3
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_exchange.cpp12
-rw-r--r--src/mongo/db/pipeline/document_source_exchange_test.cpp84
-rw-r--r--src/mongo/db/pipeline/document_source_facet.cpp10
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup.cpp25
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup_test.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_group_test.cpp32
-rw-r--r--src/mongo/db/pipeline/document_source_index_stats.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h2
-rw-r--r--src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_internal_split_pipeline.h2
-rw-r--r--src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_list_cached_and_active_users.h3
-rw-r--r--src/mongo/db/pipeline/document_source_list_local_sessions.h3
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.cpp13
-rw-r--r--src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp17
-rw-r--r--src/mongo/db/pipeline/document_source_lookup_test.cpp137
-rw-r--r--src/mongo/db/pipeline/document_source_match.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_merge.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_merge.h2
-rw-r--r--src/mongo/db/pipeline/document_source_merge_cursors_test.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_merge_test.cpp199
-rw-r--r--src/mongo/db/pipeline/document_source_mock.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_out.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_plan_cache_stats.cpp12
-rw-r--r--src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_queue.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_redact.cpp5
-rw-r--r--src/mongo/db/pipeline/document_source_replace_root.cpp9
-rw-r--r--src/mongo/db/pipeline/document_source_replace_root_test.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_sequential_document_cache.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_sequential_document_cache.h2
-rw-r--r--src/mongo/db/pipeline/document_source_skip.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_unwind.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_unwind_test.cpp23
-rw-r--r--src/mongo/db/pipeline/document_source_writer.h2
-rw-r--r--src/mongo/db/pipeline/document_value_test.cpp30
-rw-r--r--src/mongo/db/pipeline/expression.cpp281
-rw-r--r--src/mongo/db/pipeline/expression.h22
-rw-r--r--src/mongo/db/pipeline/expression_convert_test.cpp40
-rw-r--r--src/mongo/db/pipeline/expression_date_test.cpp129
-rw-r--r--src/mongo/db/pipeline/expression_test.cpp200
-rw-r--r--src/mongo/db/pipeline/expression_trigonometric.h8
-rw-r--r--src/mongo/db/pipeline/expression_trigonometric_test.cpp2
-rw-r--r--src/mongo/db/pipeline/field_path.cpp2
-rw-r--r--src/mongo/db/pipeline/field_path.h2
-rw-r--r--src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp140
-rw-r--r--src/mongo/db/pipeline/lite_parsed_document_source.cpp2
-rw-r--r--src/mongo/db/pipeline/lite_parsed_pipeline.cpp3
-rw-r--r--src/mongo/db/pipeline/lookup_set_cache.h4
-rw-r--r--src/mongo/db/pipeline/mongos_process_interface.cpp9
-rw-r--r--src/mongo/db/pipeline/parsed_aggregation_projection.cpp41
-rw-r--r--src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp38
-rw-r--r--src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp21
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp6
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp23
-rw-r--r--src/mongo/db/pipeline/pipeline_metadata_tree.h17
-rw-r--r--src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp15
-rw-r--r--src/mongo/db/pipeline/process_interface_standalone.cpp27
-rw-r--r--src/mongo/db/pipeline/process_interface_standalone_test.cpp2
-rw-r--r--src/mongo/db/pipeline/resume_token.cpp5
-rw-r--r--src/mongo/db/pipeline/resume_token_test.cpp4
-rw-r--r--src/mongo/db/pipeline/semantic_analysis.cpp4
-rw-r--r--src/mongo/db/pipeline/sharded_agg_helpers.cpp14
-rw-r--r--src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp8
-rw-r--r--src/mongo/db/pipeline/value.cpp14
-rw-r--r--src/mongo/db/pipeline/value.h4
-rw-r--r--src/mongo/db/pipeline/variables.cpp10
-rw-r--r--src/mongo/db/pipeline/variables.h2
-rw-r--r--src/mongo/db/query/canonical_query_encoder.cpp20
-rw-r--r--src/mongo/db/query/canonical_query_encoder.h4
-rw-r--r--src/mongo/db/query/collation/collation_index_key.cpp6
-rw-r--r--src/mongo/db/query/collation/collation_index_key_test.cpp3
-rw-r--r--src/mongo/db/query/collation/collation_spec_test.cpp144
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu.cpp155
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu_decoration.cpp4
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu_test.cpp176
-rw-r--r--src/mongo/db/query/collation/collator_interface_mock_test.cpp10
-rw-r--r--src/mongo/db/query/count_command_test.cpp79
-rw-r--r--src/mongo/db/query/cursor_response.cpp18
-rw-r--r--src/mongo/db/query/cursor_response_test.cpp150
-rw-r--r--src/mongo/db/query/datetime/date_time_support.cpp10
-rw-r--r--src/mongo/db/query/datetime/date_time_support.h3
-rw-r--r--src/mongo/db/query/datetime/init_timezone_data.cpp3
-rw-r--r--src/mongo/db/query/explain.h2
-rw-r--r--src/mongo/db/query/explain_options.cpp11
-rw-r--r--src/mongo/db/query/find.cpp3
-rw-r--r--src/mongo/db/query/find_and_modify_request.cpp18
-rw-r--r--src/mongo/db/query/find_and_modify_request.h14
-rw-r--r--src/mongo/db/query/get_executor.cpp14
-rw-r--r--src/mongo/db/query/get_executor_test.cpp15
-rw-r--r--src/mongo/db/query/getmore_request.cpp11
-rw-r--r--src/mongo/db/query/getmore_request_test.cpp45
-rw-r--r--src/mongo/db/query/killcursors_request.cpp4
-rw-r--r--src/mongo/db/query/killcursors_request_test.cpp21
-rw-r--r--src/mongo/db/query/killcursors_response.cpp4
-rw-r--r--src/mongo/db/query/killcursors_response_test.cpp42
-rw-r--r--src/mongo/db/query/parsed_distinct.cpp18
-rw-r--r--src/mongo/db/query/parsed_distinct_test.cpp50
-rw-r--r--src/mongo/db/query/parsed_projection.cpp8
-rw-r--r--src/mongo/db/query/parsed_projection_test.cpp5
-rw-r--r--src/mongo/db/query/plan_cache_indexability.cpp2
-rw-r--r--src/mongo/db/query/plan_cache_indexability_test.cpp4
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp3
-rw-r--r--src/mongo/db/query/plan_enumerator.cpp14
-rw-r--r--src/mongo/db/query/planner_analysis.cpp2
-rw-r--r--src/mongo/db/query/planner_ixselect.cpp15
-rw-r--r--src/mongo/db/query/planner_ixselect_test.cpp10
-rw-r--r--src/mongo/db/query/query_planner.cpp15
-rw-r--r--src/mongo/db/query/query_planner_geo_test.cpp80
-rw-r--r--src/mongo/db/query/query_planner_test.cpp17
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp4
-rw-r--r--src/mongo/db/query/query_planner_text_test.cpp113
-rw-r--r--src/mongo/db/query/query_planner_wildcard_index_test.cpp3
-rw-r--r--src/mongo/db/query/query_request.cpp20
-rw-r--r--src/mongo/db/query/query_request_test.cpp2
-rw-r--r--src/mongo/db/query/query_settings_test.cpp4
-rw-r--r--src/mongo/db/query/query_solution.cpp2
-rw-r--r--src/mongo/db/query/query_solution_test.cpp3
-rw-r--r--src/mongo/db/query/stage_builder.cpp7
-rw-r--r--src/mongo/db/read_concern.h2
-rw-r--r--src/mongo/db/read_concern_mongod.cpp12
-rw-r--r--src/mongo/db/read_concern_test.cpp4
-rw-r--r--src/mongo/db/repair_database.cpp7
-rw-r--r--src/mongo/db/repair_database_and_check_version.cpp20
-rw-r--r--src/mongo/db/repl/abstract_async_component.cpp13
-rw-r--r--src/mongo/db/repl/abstract_async_component.h3
-rw-r--r--src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h2
-rw-r--r--src/mongo/db/repl/applier_helpers.cpp3
-rw-r--r--src/mongo/db/repl/apply_ops.cpp11
-rw-r--r--src/mongo/db/repl/apply_ops.h2
-rw-r--r--src/mongo/db/repl/apply_ops_test.cpp83
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.cpp3
-rw-r--r--src/mongo/db/repl/bgsync.h22
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change.cpp5
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp241
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp65
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp11
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp68
-rw-r--r--src/mongo/db/repl/database_cloner.cpp43
-rw-r--r--src/mongo/db/repl/database_cloner_test.cpp111
-rw-r--r--src/mongo/db/repl/databases_cloner_test.cpp68
-rw-r--r--src/mongo/db/repl/dbcheck.cpp32
-rw-r--r--src/mongo/db/repl/dbcheck.h4
-rw-r--r--src/mongo/db/repl/dbcheck_idl.h2
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper_test.cpp2
-rw-r--r--src/mongo/db/repl/election_reason_counter.h2
-rw-r--r--src/mongo/db/repl/idempotency_test_fixture.cpp7
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp28
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp92
-rw-r--r--src/mongo/db/repl/is_master_response.cpp27
-rw-r--r--src/mongo/db/repl/isself.cpp3
-rw-r--r--src/mongo/db/repl/member_config.cpp12
-rw-r--r--src/mongo/db/repl/member_config_test.cpp180
-rw-r--r--src/mongo/db/repl/member_data.cpp5
-rw-r--r--src/mongo/db/repl/mock_repl_coord_server_fixture.h2
-rw-r--r--src/mongo/db/repl/oplog.cpp13
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection.cpp9
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection_test.cpp41
-rw-r--r--src/mongo/db/repl/oplog_entry.cpp5
-rw-r--r--src/mongo/db/repl/oplog_entry.h42
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp28
-rw-r--r--src/mongo/db/repl/oplog_interface_mock.cpp3
-rw-r--r--src/mongo/db/repl/oplog_test.cpp6
-rw-r--r--src/mongo/db/repl/optime_extract_test.cpp3
-rw-r--r--src/mongo/db/repl/read_concern_args.cpp38
-rw-r--r--src/mongo/db/repl/read_concern_args_test.cpp323
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp12
-rw-r--r--src/mongo/db/repl/repl_set_config.cpp112
-rw-r--r--src/mongo/db/repl/repl_set_config_checks.cpp47
-rw-r--r--src/mongo/db/repl/repl_set_config_checks_test.cpp432
-rw-r--r--src/mongo/db/repl/repl_set_config_test.cpp1272
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp7
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response.cpp15
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response_test.cpp128
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_impl.cpp21
-rw-r--r--src/mongo/db/repl/replication_coordinator.h12
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp22
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp12
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp371
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp111
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp131
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp1489
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp30
-rw-r--r--src/mongo/db/repl/replication_info.cpp5
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp3
-rw-r--r--src/mongo/db/repl/replication_recovery_test.cpp8
-rw-r--r--src/mongo/db/repl/reporter_test.cpp9
-rw-r--r--src/mongo/db/repl/roll_back_local_operations.cpp20
-rw-r--r--src/mongo/db/repl/roll_back_local_operations_test.cpp31
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp39
-rw-r--r--src/mongo/db/repl/rollback_impl.h2
-rw-r--r--src/mongo/db/repl/rollback_impl_test.cpp92
-rw-r--r--src/mongo/db/repl/rollback_source_impl.cpp4
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp7
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp51
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp633
-rw-r--r--src/mongo/db/repl/split_horizon_test.cpp3
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp42
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp42
-rw-r--r--src/mongo/db/repl/storage_interface_mock.h9
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp22
-rw-r--r--src/mongo/db/repl/sync_source_selector.h2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp34
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp170
-rw-r--r--src/mongo/db/repl/task_runner.cpp1
-rw-r--r--src/mongo/db/repl/topology_coordinator.cpp43
-rw-r--r--src/mongo/db/repl/topology_coordinator.h2
-rw-r--r--src/mongo/db/repl/topology_coordinator_v1_test.cpp1206
-rw-r--r--src/mongo/db/repl/vote_requester_test.cpp90
-rw-r--r--src/mongo/db/repl_index_build_state.h4
-rw-r--r--src/mongo/db/s/active_migrations_registry.cpp9
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.cpp4
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.h2
-rw-r--r--src/mongo/db/s/active_move_primaries_registry_test.cpp2
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.cpp6
-rw-r--r--src/mongo/db/s/add_shard_util.cpp2
-rw-r--r--src/mongo/db/s/add_shard_util.h2
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp12
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp6
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp5
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp16
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request.cpp12
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp9
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp10
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp6
-rw-r--r--src/mongo/db/s/collection_metadata.cpp3
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp9
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp3
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp18
-rw-r--r--src/mongo/db/s/collection_range_deleter.h16
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp3
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp23
-rw-r--r--src/mongo/db/s/config/configsvr_enable_sharding_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_move_primary_command.cpp7
-rw-r--r--src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp3
-rw-r--r--src/mongo/db/s/config/configsvr_remove_shard_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp25
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp12
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp122
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp55
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp11
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp8
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp16
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp50
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp8
-rw-r--r--src/mongo/db/s/config_server_op_observer_test.cpp2
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp45
-rw-r--r--src/mongo/db/s/metadata_manager.cpp12
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp23
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp8
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp37
-rw-r--r--src/mongo/db/s/migration_session_id.cpp4
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp44
-rw-r--r--src/mongo/db/s/migration_util.cpp2
-rw-r--r--src/mongo/db/s/migration_util.h2
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp4
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp3
-rw-r--r--src/mongo/db/s/scoped_operation_completion_sharding_actions.h2
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.cpp37
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.cpp8
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp16
-rw-r--r--src/mongo/db/s/shard_key_util.cpp7
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp7
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp71
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp5
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp66
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod_test.cpp45
-rw-r--r--src/mongo/db/s/sharding_logging.cpp14
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection.cpp57
-rw-r--r--src/mongo/db/s/split_chunk.cpp18
-rw-r--r--src/mongo/db/s/transaction_coordinator.cpp7
-rw-r--r--src/mongo/db/s/transaction_coordinator_catalog.cpp4
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.cpp10
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.h43
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util_test.cpp6
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator_structures_test.cpp3
-rw-r--r--src/mongo/db/s/transaction_coordinator_test.cpp65
-rw-r--r--src/mongo/db/s/transaction_coordinator_util.cpp45
-rw-r--r--src/mongo/db/s/txn_two_phase_commit_cmds.cpp11
-rw-r--r--src/mongo/db/s/type_shard_identity_test.cpp21
-rw-r--r--src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp4
-rw-r--r--src/mongo/db/server_options.h18
-rw-r--r--src/mongo/db/server_options_helpers.h28
-rw-r--r--src/mongo/db/service_context_test_fixture.h6
-rw-r--r--src/mongo/db/service_entry_point_common.cpp37
-rw-r--r--src/mongo/db/session_catalog_mongod.cpp14
-rw-r--r--src/mongo/db/session_catalog_test.cpp42
-rw-r--r--src/mongo/db/sessions_collection_config_server.h18
-rw-r--r--src/mongo/db/sorter/sorter.cpp40
-rw-r--r--src/mongo/db/sorter/sorter.h2
-rw-r--r--src/mongo/db/startup_warnings_common.cpp6
-rw-r--r--src/mongo/db/startup_warnings_mongod.cpp14
-rw-r--r--src/mongo/db/stats/counters.cpp2
-rw-r--r--src/mongo/db/stats/counters.h2
-rw-r--r--src/mongo/db/stats/fine_clock.h2
-rw-r--r--src/mongo/db/stats/timer_stats.cpp2
-rw-r--r--src/mongo/db/stats/timer_stats.h2
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store.cpp5
-rw-r--r--src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp8
-rw-r--r--src/mongo/db/storage/biggie/store.h8
-rw-r--r--src/mongo/db/storage/biggie/store_test.cpp4
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp2
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.h2
-rw-r--r--src/mongo/db/storage/capped_callback.h2
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.h2
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.cpp4
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp6
-rw-r--r--src/mongo/db/storage/journal_listener.h2
-rw-r--r--src/mongo/db/storage/key_string.cpp20
-rw-r--r--src/mongo/db/storage/key_string_test.cpp7
-rw-r--r--src/mongo/db/storage/key_string_to_bson_fuzzer.cpp2
-rw-r--r--src/mongo/db/storage/kv/durable_catalog_test.cpp3
-rw-r--r--src/mongo/db/storage/kv/kv_engine.h2
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp9
-rw-r--r--src/mongo/db/storage/kv/kv_prefix.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_prefix.h2
-rw-r--r--src/mongo/db/storage/kv/temporary_kv_record_store.h3
-rw-r--r--src/mongo/db/storage/mobile/mobile_session_pool.h4
-rw-r--r--src/mongo/db/storage/record_store.h2
-rw-r--r--src/mongo/db/storage/record_store_test_harness.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_randomiter.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_recorditer.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_recordstore.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_repairiter.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_storagesize.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_touch.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_truncate.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_updaterecord.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_updatewithdamages.cpp2
-rw-r--r--src/mongo/db/storage/remove_saver.cpp2
-rw-r--r--src/mongo/db/storage/snapshot.h2
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp96
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp45
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp23
-rw-r--r--src/mongo/db/storage/storage_engine.h4
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp27
-rw-r--r--src/mongo/db/storage/storage_engine_init.cpp21
-rw-r--r--src/mongo/db/storage/storage_engine_interface.h2
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_posix.cpp34
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_test.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_windows.cpp12
-rw-r--r--src/mongo/db/storage/storage_engine_metadata.cpp66
-rw-r--r--src/mongo/db/storage/storage_engine_metadata_test.cpp2
-rw-r--r--src/mongo/db/storage/storage_file_util.cpp12
-rw-r--r--src/mongo/db/storage/storage_init.cpp13
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp37
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp12
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp10
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp31
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp38
-rw-r--r--src/mongo/db/system_index.cpp16
-rw-r--r--src/mongo/db/traffic_reader.cpp4
-rw-r--r--src/mongo/db/traffic_recorder.cpp2
-rw-r--r--src/mongo/db/traffic_recorder_validators.cpp4
-rw-r--r--src/mongo/db/transaction_history_iterator.cpp3
-rw-r--r--src/mongo/db/transaction_participant.cpp104
-rw-r--r--src/mongo/db/transaction_participant_test.cpp187
-rw-r--r--src/mongo/db/update/addtoset_node.cpp3
-rw-r--r--src/mongo/db/update/addtoset_node_test.cpp2
-rw-r--r--src/mongo/db/update/arithmetic_node.cpp16
-rw-r--r--src/mongo/db/update/arithmetic_node_test.cpp2
-rw-r--r--src/mongo/db/update/bit_node.cpp15
-rw-r--r--src/mongo/db/update/bit_node.h2
-rw-r--r--src/mongo/db/update/bit_node_test.cpp4
-rw-r--r--src/mongo/db/update/compare_node_test.cpp2
-rw-r--r--src/mongo/db/update/current_date_node_test.cpp4
-rw-r--r--src/mongo/db/update/field_checker_test.cpp4
-rw-r--r--src/mongo/db/update/log_builder.cpp16
-rw-r--r--src/mongo/db/update/modifier_node.cpp18
-rw-r--r--src/mongo/db/update/object_replace_executor.cpp3
-rw-r--r--src/mongo/db/update/object_replace_executor_test.cpp2
-rw-r--r--src/mongo/db/update/path_support.cpp18
-rw-r--r--src/mongo/db/update/path_support_test.cpp20
-rw-r--r--src/mongo/db/update/pipeline_executor_test.cpp2
-rw-r--r--src/mongo/db/update/pop_node.cpp3
-rw-r--r--src/mongo/db/update/pull_node_test.cpp2
-rw-r--r--src/mongo/db/update/pullall_node.cpp2
-rw-r--r--src/mongo/db/update/pullall_node_test.cpp2
-rw-r--r--src/mongo/db/update/push_node.cpp6
-rw-r--r--src/mongo/db/update/push_node_test.cpp15
-rw-r--r--src/mongo/db/update/rename_node.cpp16
-rw-r--r--src/mongo/db/update/rename_node_test.cpp5
-rw-r--r--src/mongo/db/update/set_node_test.cpp2
-rw-r--r--src/mongo/db/update/storage_validation.cpp3
-rw-r--r--src/mongo/db/update/unset_node_test.cpp2
-rw-r--r--src/mongo/db/update/update_array_node.h2
-rw-r--r--src/mongo/db/update/update_driver.cpp16
-rw-r--r--src/mongo/db/update/update_leaf_node.cpp10
-rw-r--r--src/mongo/db/update/update_object_node.cpp33
-rw-r--r--src/mongo/db/update/update_object_node.h2
-rw-r--r--src/mongo/db/update/update_serialization_test.cpp2
-rw-r--r--src/mongo/db/update_index_data.cpp2
-rw-r--r--src/mongo/db/update_index_data.h2
-rw-r--r--src/mongo/db/update_index_data_test.cpp2
-rw-r--r--src/mongo/db/views/durable_view_catalog.cpp4
-rw-r--r--src/mongo/db/views/resolved_view_test.cpp27
-rw-r--r--src/mongo/db/views/view_catalog.cpp3
-rw-r--r--src/mongo/db/views/view_catalog_test.cpp3
-rw-r--r--src/mongo/db/views/view_graph.cpp6
-rw-r--r--src/mongo/db/write_concern.cpp2
-rw-r--r--src/mongo/dbtests/basictests.cpp4
-rw-r--r--src/mongo/dbtests/clienttests.cpp4
-rw-r--r--src/mongo/dbtests/commandtests.cpp10
-rw-r--r--src/mongo/dbtests/counttests.cpp9
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp2
-rw-r--r--src/mongo/dbtests/deferred_writer.cpp5
-rw-r--r--src/mongo/dbtests/directclienttests.cpp5
-rw-r--r--src/mongo/dbtests/framework.h2
-rw-r--r--src/mongo/dbtests/framework_options.cpp2
-rw-r--r--src/mongo/dbtests/framework_options.h2
-rw-r--r--src/mongo/dbtests/framework_options_init.cpp2
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp6
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp168
-rw-r--r--src/mongo/dbtests/jsobjtests.cpp85
-rw-r--r--src/mongo/dbtests/jsontests.cpp3
-rw-r--r--src/mongo/dbtests/jstests.cpp20
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.h2
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_replica_set.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_replica_set.h2
-rw-r--r--src/mongo/dbtests/mock_dbclient_conn_test.cpp12
-rw-r--r--src/mongo/dbtests/mock_replica_set_test.cpp2
-rw-r--r--src/mongo/dbtests/multikey_paths_test.cpp45
-rw-r--r--src/mongo/dbtests/plan_executor_invalidation_test.cpp6
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp5
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp3
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp3
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp2
-rw-r--r--src/mongo/dbtests/querytests.cpp38
-rw-r--r--src/mongo/dbtests/replica_set_monitor_test.cpp24
-rw-r--r--src/mongo/dbtests/repltests.cpp2
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp4
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp500
-rw-r--r--src/mongo/dbtests/threadedtests.cpp2
-rw-r--r--src/mongo/dbtests/updatetests.cpp19
-rw-r--r--src/mongo/dbtests/validate_tests.cpp195
-rw-r--r--src/mongo/embedded/embedded.cpp4
-rw-r--r--src/mongo/embedded/embedded_ismaster.cpp2
-rw-r--r--src/mongo/embedded/embedded_options_helpers.cpp2
-rw-r--r--src/mongo/embedded/stitch_support/stitch_support_test.cpp4
-rw-r--r--src/mongo/executor/connection_pool.cpp21
-rw-r--r--src/mongo/executor/connection_pool_test.cpp4
-rw-r--r--src/mongo/executor/connection_pool_test_fixture.cpp8
-rw-r--r--src/mongo/executor/connection_pool_tl.cpp12
-rw-r--r--src/mongo/executor/connection_pool_tl.h2
-rw-r--r--src/mongo/executor/egress_tag_closer.h7
-rw-r--r--src/mongo/executor/network_interface_integration_test.cpp23
-rw-r--r--src/mongo/executor/network_interface_mock.cpp52
-rw-r--r--src/mongo/executor/network_interface_tl.cpp55
-rw-r--r--src/mongo/executor/scoped_task_executor.cpp4
-rw-r--r--src/mongo/executor/task_executor.h1
-rw-r--r--src/mongo/executor/task_executor_cursor_integration_test.cpp3
-rw-r--r--src/mongo/executor/task_executor_cursor_test.cpp48
-rw-r--r--src/mongo/executor/task_executor_test_common.cpp7
-rw-r--r--src/mongo/executor/task_executor_test_common.h7
-rw-r--r--src/mongo/executor/thread_pool_task_executor.cpp2
-rw-r--r--src/mongo/idl/config_option_test.cpp22
-rw-r--r--src/mongo/idl/idl_parser.cpp20
-rw-r--r--src/mongo/idl/idl_parser.h9
-rw-r--r--src/mongo/idl/idl_test.cpp329
-rw-r--r--src/mongo/idl/server_parameter_specialized_test.cpp12
-rw-r--r--src/mongo/idl/server_parameter_with_storage.h11
-rw-r--r--src/mongo/logger/encoder.h2
-rw-r--r--src/mongo/logger/log_component.cpp4
-rw-r--r--src/mongo/logger/log_component_settings.cpp4
-rw-r--r--src/mongo/logger/log_manager.cpp4
-rw-r--r--src/mongo/logger/log_severity.cpp6
-rw-r--r--src/mongo/logger/log_test.cpp3
-rw-r--r--src/mongo/logger/parse_log_component_settings.cpp26
-rw-r--r--src/mongo/logger/parse_log_component_settings_test.cpp7
-rw-r--r--src/mongo/logger/ramlog.cpp2
-rw-r--r--src/mongo/logger/ramlog.h2
-rw-r--r--src/mongo/logger/rotatable_file_writer.cpp18
-rw-r--r--src/mongo/logger/rotatable_file_writer_test.cpp2
-rw-r--r--src/mongo/logv2/log_component.cpp4
-rw-r--r--src/mongo/logv2/log_component_settings.cpp4
-rw-r--r--src/mongo/logv2/log_domain_global.cpp4
-rw-r--r--src/mongo/logv2/log_manager.cpp4
-rw-r--r--src/mongo/logv2/log_manager.h12
-rw-r--r--src/mongo/logv2/log_severity.cpp6
-rw-r--r--src/mongo/logv2/log_source.h13
-rw-r--r--src/mongo/logv2/logv2_bm.cpp2
-rw-r--r--src/mongo/platform/atomic_proxy.h8
-rw-r--r--src/mongo/platform/bits.h2
-rw-r--r--src/mongo/platform/bits_test.cpp2
-rw-r--r--src/mongo/platform/condition_variable_test.cpp2
-rw-r--r--src/mongo/platform/decimal128_test.cpp6
-rw-r--r--src/mongo/platform/mutex_test.cpp2
-rw-r--r--src/mongo/platform/random_test.cpp2
-rw-r--r--src/mongo/platform/shared_library_posix.cpp3
-rw-r--r--src/mongo/platform/strcasestr.h2
-rw-r--r--src/mongo/rpc/get_status_from_command_result.cpp8
-rw-r--r--src/mongo/rpc/legacy_reply.cpp9
-rw-r--r--src/mongo/rpc/legacy_request.cpp4
-rw-r--r--src/mongo/rpc/metadata.cpp2
-rw-r--r--src/mongo/rpc/metadata/client_metadata.cpp39
-rw-r--r--src/mongo/rpc/metadata/client_metadata_test.cpp91
-rw-r--r--src/mongo/rpc/metadata/config_server_metadata.cpp4
-rw-r--r--src/mongo/rpc/metadata/logical_time_metadata_test.cpp2
-rw-r--r--src/mongo/rpc/metadata/oplog_query_metadata_test.cpp19
-rw-r--r--src/mongo/rpc/metadata/repl_set_metadata_test.cpp14
-rw-r--r--src/mongo/rpc/metadata/sharding_metadata_test.cpp10
-rw-r--r--src/mongo/rpc/metadata/tracking_metadata.cpp4
-rw-r--r--src/mongo/rpc/metadata/tracking_metadata_test.cpp9
-rw-r--r--src/mongo/rpc/metadata_test.cpp11
-rw-r--r--src/mongo/rpc/object_check_test.cpp2
-rw-r--r--src/mongo/rpc/op_msg_integration_test.cpp3
-rw-r--r--src/mongo/rpc/op_msg_test.cpp219
-rw-r--r--src/mongo/rpc/protocol.cpp13
-rw-r--r--src/mongo/rpc/protocol.h4
-rw-r--r--src/mongo/rpc/protocol_test.cpp9
-rw-r--r--src/mongo/rpc/write_concern_error_detail.cpp4
-rw-r--r--src/mongo/s/async_requests_sender.cpp7
-rw-r--r--src/mongo/s/balancer_configuration_test.cpp3
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl.cpp27
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.cpp37
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.h2
-rw-r--r--src/mongo/s/catalog/dist_lock_manager_mock.cpp8
-rw-r--r--src/mongo/s/catalog/dist_lock_ping_info.cpp2
-rw-r--r--src/mongo/s/catalog/dist_lock_ping_info.h2
-rw-r--r--src/mongo/s/catalog/mongo_version_range.cpp2
-rw-r--r--src/mongo/s/catalog/mongo_version_range.h2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp36
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp74
-rw-r--r--src/mongo/s/catalog/type_changelog_test.cpp51
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp15
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp103
-rw-r--r--src/mongo/s/catalog/type_collection_test.cpp115
-rw-r--r--src/mongo/s/catalog/type_config_version_test.cpp8
-rw-r--r--src/mongo/s/catalog/type_database.cpp8
-rw-r--r--src/mongo/s/catalog/type_database_test.cpp3
-rw-r--r--src/mongo/s/catalog/type_locks_test.cpp94
-rw-r--r--src/mongo/s/catalog/type_mongos_test.cpp80
-rw-r--r--src/mongo/s/catalog/type_shard_collection.h26
-rw-r--r--src/mongo/s/catalog/type_shard_database.cpp8
-rw-r--r--src/mongo/s/catalog/type_shard_test.cpp11
-rw-r--r--src/mongo/s/catalog/type_tags_test.cpp14
-rw-r--r--src/mongo/s/catalog_cache.cpp22
-rw-r--r--src/mongo/s/chunk.cpp3
-rw-r--r--src/mongo/s/chunk_manager.cpp6
-rw-r--r--src/mongo/s/chunk_manager_index_bounds_test.cpp3
-rw-r--r--src/mongo/s/client/parallel.cpp31
-rw-r--r--src/mongo/s/client/shard.h20
-rw-r--r--src/mongo/s/client/shard_registry.cpp4
-rw-r--r--src/mongo/s/client/shard_remote.cpp1
-rw-r--r--src/mongo/s/client/shard_remote.h8
-rw-r--r--src/mongo/s/client/sharding_connection_hook.cpp4
-rw-r--r--src/mongo/s/client/version_manager.cpp37
-rw-r--r--src/mongo/s/cluster_commands_helpers.cpp10
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_data_size_cmd.cpp7
-rw-r--r--src/mongo/s/commands/cluster_explain.cpp19
-rw-r--r--src/mongo/s/commands/cluster_explain_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_find_test.cpp3
-rw-r--r--src/mongo/s/commands/cluster_kill_op.cpp4
-rw-r--r--src/mongo/s/commands/cluster_map_reduce.cpp21
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp15
-rw-r--r--src/mongo/s/commands/cluster_split_cmd.cpp20
-rw-r--r--src/mongo/s/commands/commands_public.cpp4
-rw-r--r--src/mongo/s/commands/strategy.cpp12
-rw-r--r--src/mongo/s/grid.cpp5
-rw-r--r--src/mongo/s/mongos_options.h2
-rw-r--r--src/mongo/s/query/async_results_merger.cpp6
-rw-r--r--src/mongo/s/query/async_results_merger_test.cpp67
-rw-r--r--src/mongo/s/query/blocking_results_merger_test.cpp1
-rw-r--r--src/mongo/s/query/cluster_aggregate.cpp6
-rw-r--r--src/mongo/s/query/cluster_aggregation_planner.cpp3
-rw-r--r--src/mongo/s/query/cluster_client_cursor_params.h2
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp11
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.h2
-rw-r--r--src/mongo/s/query/cluster_find.cpp41
-rw-r--r--src/mongo/s/query/router_stage_pipeline.cpp3
-rw-r--r--src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp5
-rw-r--r--src/mongo/s/query/store_possible_cursor.h2
-rw-r--r--src/mongo/s/request_types/add_shard_request_test.cpp51
-rw-r--r--src/mongo/s/request_types/add_shard_to_zone_request_test.cpp15
-rw-r--r--src/mongo/s/request_types/balance_chunk_request_test.cpp42
-rw-r--r--src/mongo/s/request_types/merge_chunk_request_test.cpp116
-rw-r--r--src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp5
-rw-r--r--src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp15
-rw-r--r--src/mongo/s/request_types/set_shard_version_request_test.cpp256
-rw-r--r--src/mongo/s/request_types/split_chunk_request_test.cpp257
-rw-r--r--src/mongo/s/request_types/split_chunk_request_type.cpp4
-rw-r--r--src/mongo/s/request_types/update_zone_key_range_request_type.cpp5
-rw-r--r--src/mongo/s/server.cpp8
-rw-r--r--src/mongo/s/shard_key_pattern.cpp8
-rw-r--r--src/mongo/s/shard_key_pattern_test.cpp12
-rw-r--r--src/mongo/s/shard_util.cpp14
-rw-r--r--src/mongo/s/sharding_egress_metadata_hook.cpp4
-rw-r--r--src/mongo/s/sharding_initialization.h2
-rw-r--r--src/mongo/s/sharding_mongod_test_fixture.cpp11
-rw-r--r--src/mongo/s/sharding_router_test_fixture.cpp12
-rw-r--r--src/mongo/s/sharding_task_executor.cpp9
-rw-r--r--src/mongo/s/sharding_task_executor_pool_controller.cpp2
-rw-r--r--src/mongo/s/transaction_router.cpp45
-rw-r--r--src/mongo/s/transaction_router.h271
-rw-r--r--src/mongo/s/transaction_router_test.cpp179
-rw-r--r--src/mongo/s/write_ops/batch_downconvert.cpp11
-rw-r--r--src/mongo/s/write_ops/batch_downconvert_test.cpp13
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp13
-rw-r--r--src/mongo/s/write_ops/batch_write_op.cpp22
-rw-r--r--src/mongo/s/write_ops/batched_command_request_test.cpp20
-rw-r--r--src/mongo/s/write_ops/batched_command_response.cpp6
-rw-r--r--src/mongo/s/write_ops/batched_command_response_test.cpp14
-rw-r--r--src/mongo/s/write_ops/chunk_manager_targeter.cpp24
-rw-r--r--src/mongo/scripting/bson_template_evaluator.h2
-rw-r--r--src/mongo/scripting/bson_template_evaluator_test.cpp50
-rw-r--r--src/mongo/scripting/engine.cpp8
-rw-r--r--src/mongo/scripting/engine.h2
-rw-r--r--src/mongo/scripting/engine_none.cpp2
-rw-r--r--src/mongo/scripting/mozjs/bson.cpp4
-rw-r--r--src/mongo/scripting/mozjs/code.cpp9
-rw-r--r--src/mongo/scripting/mozjs/cursor_handle.cpp3
-rw-r--r--src/mongo/scripting/mozjs/implscope.cpp2
-rw-r--r--src/mongo/scripting/mozjs/mongo.cpp9
-rw-r--r--src/mongo/scripting/mozjs/mongohelpers.js3
-rw-r--r--src/mongo/scripting/mozjs/nativefunction.cpp3
-rw-r--r--src/mongo/scripting/mozjs/object.cpp3
-rw-r--r--src/mongo/scripting/mozjs/objectwrapper.cpp7
-rw-r--r--src/mongo/scripting/mozjs/regexp.cpp3
-rw-r--r--src/mongo/scripting/mozjs/session.cpp4
-rw-r--r--src/mongo/scripting/mozjs/timestamp.cpp7
-rw-r--r--src/mongo/scripting/mozjs/uri.cpp3
-rw-r--r--src/mongo/scripting/mozjs/valuewriter.cpp3
-rw-r--r--src/mongo/scripting/mozjs/wrapconstrainedmethod.h13
-rw-r--r--src/mongo/scripting/mozjs/wraptype.h30
-rw-r--r--src/mongo/shell/assert.js22
-rw-r--r--src/mongo/shell/bench.cpp27
-rw-r--r--src/mongo/shell/bench.h8
-rw-r--r--src/mongo/shell/bulk_api.js26
-rw-r--r--src/mongo/shell/collection.js178
-rw-r--r--src/mongo/shell/crud_api.js296
-rw-r--r--src/mongo/shell/db.js3218
-rw-r--r--src/mongo/shell/dbshell.cpp12
-rw-r--r--src/mongo/shell/encrypted_dbclient_base.cpp4
-rw-r--r--src/mongo/shell/encrypted_shell_options.h2
-rw-r--r--src/mongo/shell/explain_query.js5
-rw-r--r--src/mongo/shell/explainable.js7
-rw-r--r--src/mongo/shell/kms_aws.cpp2
-rw-r--r--src/mongo/shell/kms_local.cpp2
-rw-r--r--src/mongo/shell/linenoise.cpp12
-rw-r--r--src/mongo/shell/linenoise_utf8.h5
-rw-r--r--src/mongo/shell/mk_wcwidth.cpp18
-rw-r--r--src/mongo/shell/mongo.js17
-rw-r--r--src/mongo/shell/query.js84
-rw-r--r--src/mongo/shell/replsettest.js30
-rw-r--r--src/mongo/shell/servers.js2343
-rw-r--r--src/mongo/shell/servers_misc.js48
-rw-r--r--src/mongo/shell/session.js110
-rw-r--r--src/mongo/shell/shardingtest.js18
-rw-r--r--src/mongo/shell/shell_options.cpp11
-rw-r--r--src/mongo/shell/shell_options.h2
-rw-r--r--src/mongo/shell/shell_options_init.cpp2
-rw-r--r--src/mongo/shell/shell_utils.cpp7
-rw-r--r--src/mongo/shell/shell_utils.h4
-rw-r--r--src/mongo/shell/shell_utils_extended.cpp8
-rw-r--r--src/mongo/shell/shell_utils_extended.h2
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp10
-rw-r--r--src/mongo/shell/types.js35
-rw-r--r--src/mongo/shell/utils.js82
-rw-r--r--src/mongo/shell/utils_auth.js242
-rw-r--r--src/mongo/shell/utils_sh.js64
-rw-r--r--src/mongo/stdx/mutex.h2
-rw-r--r--src/mongo/stdx/thread.h8
-rw-r--r--src/mongo/stdx/variant.h12
-rw-r--r--src/mongo/tools/mongobridge_options.h2
-rw-r--r--src/mongo/tools/mongobridge_options_init.cpp2
-rw-r--r--src/mongo/tools/mongoebench_options.cpp3
-rw-r--r--src/mongo/transport/baton_asio_linux.h10
-rw-r--r--src/mongo/transport/max_conns_override_test.cpp2
-rw-r--r--src/mongo/transport/message_compressor_manager_test.cpp5
-rw-r--r--src/mongo/transport/message_compressor_registry.h2
-rw-r--r--src/mongo/transport/service_entry_point.h8
-rw-r--r--src/mongo/transport/service_entry_point_impl.cpp6
-rw-r--r--src/mongo/transport/service_executor_adaptive.cpp47
-rw-r--r--src/mongo/transport/service_executor_adaptive_test.cpp15
-rw-r--r--src/mongo/transport/service_executor_synchronous.cpp2
-rw-r--r--src/mongo/transport/service_executor_test.cpp2
-rw-r--r--src/mongo/transport/service_state_machine.cpp2
-rw-r--r--src/mongo/transport/service_state_machine_test.cpp15
-rw-r--r--src/mongo/transport/session.h14
-rw-r--r--src/mongo/transport/session_asio.h5
-rw-r--r--src/mongo/transport/transport_layer_asio.cpp10
-rw-r--r--src/mongo/transport/transport_layer_asio_integration_test.cpp7
-rw-r--r--src/mongo/transport/transport_layer_asio_test.cpp4
-rw-r--r--src/mongo/transport/transport_layer_egress_init.cpp1
-rw-r--r--src/mongo/unittest/system_resource_canary_bm.cpp4
-rw-r--r--src/mongo/unittest/temp_dir.cpp2
-rw-r--r--src/mongo/unittest/unittest_helpers.cpp2
-rw-r--r--src/mongo/util/alarm.h2
-rw-r--r--src/mongo/util/alarm_test.cpp12
-rw-r--r--src/mongo/util/assert_util.cpp11
-rw-r--r--src/mongo/util/assert_util_test.cpp16
-rw-r--r--src/mongo/util/boost_assert_impl.cpp7
-rw-r--r--src/mongo/util/bson_util.h2
-rw-r--r--src/mongo/util/bufreader.h2
-rw-r--r--src/mongo/util/checksum.h2
-rw-r--r--src/mongo/util/clock_source_mock_test.cpp21
-rw-r--r--src/mongo/util/cmdline_utils/censor_cmdline.cpp2
-rw-r--r--src/mongo/util/cmdline_utils/censor_cmdline.h2
-rw-r--r--src/mongo/util/concurrency/idle_thread_block.cpp4
-rw-r--r--src/mongo/util/concurrency/mutex.h2
-rw-r--r--src/mongo/util/concurrency/thread_name.cpp4
-rw-r--r--src/mongo/util/concurrency/thread_pool.cpp4
-rw-r--r--src/mongo/util/concurrency/ticketholder.cpp5
-rw-r--r--src/mongo/util/concurrency/value.h2
-rw-r--r--src/mongo/util/debugger.cpp2
-rw-r--r--src/mongo/util/decimal_counter.h2
-rw-r--r--src/mongo/util/diagnostic_info.cpp2
-rw-r--r--src/mongo/util/diagnostic_info.h2
-rw-r--r--src/mongo/util/diagnostic_info_test.cpp2
-rw-r--r--src/mongo/util/dns_name.h4
-rw-r--r--src/mongo/util/dns_query_test.cpp9
-rw-r--r--src/mongo/util/exception_filter_win32.cpp4
-rw-r--r--src/mongo/util/exit.cpp5
-rw-r--r--src/mongo/util/fail_point.cpp2
-rw-r--r--src/mongo/util/fail_point_test.cpp7
-rw-r--r--src/mongo/util/file.cpp21
-rw-r--r--src/mongo/util/file.h2
-rw-r--r--src/mongo/util/future.h2
-rw-r--r--src/mongo/util/future_impl.h11
-rw-r--r--src/mongo/util/future_test_edge_cases.cpp2
-rw-r--r--src/mongo/util/future_test_executor_future.cpp52
-rw-r--r--src/mongo/util/future_test_future_int.cpp22
-rw-r--r--src/mongo/util/future_test_future_move_only.cpp10
-rw-r--r--src/mongo/util/future_test_future_void.cpp22
-rw-r--r--src/mongo/util/future_test_shared_future.cpp5
-rw-r--r--src/mongo/util/future_test_utils.h5
-rw-r--r--src/mongo/util/hex.cpp2
-rw-r--r--src/mongo/util/hex.h2
-rw-r--r--src/mongo/util/if_constexpr.h4
-rw-r--r--src/mongo/util/intrusive_counter.cpp3
-rw-r--r--src/mongo/util/log.h58
-rw-r--r--src/mongo/util/log_and_backoff.cpp4
-rw-r--r--src/mongo/util/lru_cache_test.cpp5
-rw-r--r--src/mongo/util/map_util.h2
-rw-r--r--src/mongo/util/md5_test.cpp2
-rw-r--r--src/mongo/util/md5main.cpp4
-rw-r--r--src/mongo/util/net/cidr.cpp4
-rw-r--r--src/mongo/util/net/hostandport.cpp25
-rw-r--r--src/mongo/util/net/http_client_none.cpp2
-rw-r--r--src/mongo/util/net/http_client_winhttp.cpp6
-rw-r--r--src/mongo/util/net/private/socket_poll.cpp2
-rw-r--r--src/mongo/util/net/private/socket_poll.h2
-rw-r--r--src/mongo/util/net/sock.cpp6
-rw-r--r--src/mongo/util/net/ssl/context_schannel.hpp44
-rw-r--r--src/mongo/util/net/ssl/detail/impl/engine_apple.ipp8
-rw-r--r--src/mongo/util/net/ssl/detail/io.hpp2
-rw-r--r--src/mongo/util/net/ssl_manager.cpp23
-rw-r--r--src/mongo/util/net/ssl_manager.h6
-rw-r--r--src/mongo/util/net/ssl_manager_apple.cpp11
-rw-r--r--src/mongo/util/net/ssl_manager_openssl.cpp30
-rw-r--r--src/mongo/util/net/ssl_manager_test.cpp14
-rw-r--r--src/mongo/util/net/ssl_manager_windows.cpp74
-rw-r--r--src/mongo/util/net/ssl_options.cpp13
-rw-r--r--src/mongo/util/net/ssl_options.h24
-rw-r--r--src/mongo/util/net/ssl_parameters.cpp9
-rw-r--r--src/mongo/util/net/ssl_parameters_auth.cpp16
-rw-r--r--src/mongo/util/net/ssl_stream.cpp6
-rw-r--r--src/mongo/util/ntservice.cpp2
-rw-r--r--src/mongo/util/options_parser/constraints.h7
-rw-r--r--src/mongo/util/options_parser/environment_test.cpp9
-rw-r--r--src/mongo/util/options_parser/option_section.cpp6
-rw-r--r--src/mongo/util/options_parser/options_parser.cpp27
-rw-r--r--src/mongo/util/options_parser/options_parser_test.cpp3
-rw-r--r--src/mongo/util/perfctr_collect.cpp4
-rw-r--r--src/mongo/util/perfctr_collect_test.cpp20
-rw-r--r--src/mongo/util/periodic_runner.h2
-rw-r--r--src/mongo/util/periodic_runner_factory.cpp2
-rw-r--r--src/mongo/util/periodic_runner_impl.cpp4
-rw-r--r--src/mongo/util/polymorphic_scoped.h4
-rw-r--r--src/mongo/util/processinfo.h2
-rw-r--r--src/mongo/util/processinfo_linux.cpp38
-rw-r--r--src/mongo/util/processinfo_openbsd.cpp2
-rw-r--r--src/mongo/util/processinfo_osx.cpp2
-rw-r--r--src/mongo/util/processinfo_solaris.cpp2
-rw-r--r--src/mongo/util/processinfo_test.cpp4
-rw-r--r--src/mongo/util/processinfo_unknown.cpp2
-rw-r--r--src/mongo/util/processinfo_windows.cpp2
-rw-r--r--src/mongo/util/procparser.cpp17
-rw-r--r--src/mongo/util/procparser.h12
-rw-r--r--src/mongo/util/procparser_test.cpp14
-rw-r--r--src/mongo/util/producer_consumer_queue.h9
-rw-r--r--src/mongo/util/producer_consumer_queue_test.cpp4
-rw-r--r--src/mongo/util/progress_meter.cpp2
-rw-r--r--src/mongo/util/progress_meter.h2
-rw-r--r--src/mongo/util/queue.h2
-rw-r--r--src/mongo/util/regex_util.cpp10
-rw-r--r--src/mongo/util/regex_util.h4
-rw-r--r--src/mongo/util/safe_num.h2
-rw-r--r--src/mongo/util/safe_num_test.cpp2
-rw-r--r--src/mongo/util/scopeguard.h2
-rw-r--r--src/mongo/util/shared_buffer.h2
-rw-r--r--src/mongo/util/signal_handlers.cpp2
-rw-r--r--src/mongo/util/signal_win32.cpp2
-rw-r--r--src/mongo/util/signal_win32.h2
-rw-r--r--src/mongo/util/stack_introspect.h2
-rw-r--r--src/mongo/util/stacktrace_posix.cpp4
-rw-r--r--src/mongo/util/stacktrace_unwind.cpp4
-rw-r--r--src/mongo/util/stacktrace_windows.cpp2
-rw-r--r--src/mongo/util/string_map_test.cpp7
-rw-r--r--src/mongo/util/summation_test.cpp60
-rw-r--r--src/mongo/util/tcmalloc_set_parameter.cpp12
-rw-r--r--src/mongo/util/text.cpp8
-rw-r--r--src/mongo/util/tick_source_test.cpp2
-rw-r--r--src/mongo/util/unique_function_test.cpp2
-rw-r--r--src/mongo/util/unowned_ptr_test.cpp2
-rw-r--r--src/mongo/watchdog/watchdog_mongod.h4
1232 files changed, 14032 insertions, 21394 deletions
diff --git a/src/mongo/base/clonable_ptr.h b/src/mongo/base/clonable_ptr.h
index 42b0c63c1db..6f6f890e2de 100644
--- a/src/mongo/base/clonable_ptr.h
+++ b/src/mongo/base/clonable_ptr.h
@@ -264,8 +264,9 @@ public:
* NOTE: This constructor is disabled for types with a stateless `CloneFactory` type.
*/
template <typename CloneFactory_ = CloneFactory>
- inline clonable_ptr(typename std::enable_if<!std::is_empty<CloneFactory_>::value,
- std::nullptr_t>::type) = delete;
+ inline clonable_ptr(
+ typename std::enable_if<!std::is_empty<CloneFactory_>::value, std::nullptr_t>::type) =
+ delete;
/*!
* Constructs a pointer to nothing, with a default `CloneFactory`.
diff --git a/src/mongo/base/concept/assignable.h b/src/mongo/base/concept/assignable.h
index 15fcc555b01..0c3e2d68e59 100644
--- a/src/mongo/base/concept/assignable.h
+++ b/src/mongo/base/concept/assignable.h
@@ -33,9 +33,9 @@
namespace mongo {
namespace concept {
-/*!
- * The Assignable concept models a type which can be copy assigned and copy constructed.
- */
-struct Assignable : CopyConstructible, CopyAssignable {};
+ /*!
+ * The Assignable concept models a type which can be copy assigned and copy constructed.
+ */
+ struct Assignable : CopyConstructible, CopyAssignable {};
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/clonable.h b/src/mongo/base/concept/clonable.h
index 63cdceec353..d658b0e5442 100644
--- a/src/mongo/base/concept/clonable.h
+++ b/src/mongo/base/concept/clonable.h
@@ -33,16 +33,16 @@
namespace mongo {
namespace concept {
-/*!
- * Objects conforming to the Clonable concept can be dynamically copied, using `this->clone()`.
- * The Clonable concept does not specify the return type of the `clone()` function.
- */
-struct Clonable {
- /*! Clonable objects must be safe to destroy, by pointer. */
- virtual ~Clonable() noexcept = 0;
+ /*!
+ * Objects conforming to the Clonable concept can be dynamically copied, using `this->clone()`.
+ * The Clonable concept does not specify the return type of the `clone()` function.
+ */
+ struct Clonable {
+ /*! Clonable objects must be safe to destroy, by pointer. */
+ virtual ~Clonable() noexcept = 0;
- /*! Clonable objects can be cloned without knowing the actual dynamic type. */
- Constructible<UniquePtr<Clonable>> clone() const;
-};
+ /*! Clonable objects can be cloned without knowing the actual dynamic type. */
+ Constructible<UniquePtr<Clonable>> clone() const;
+ };
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/clone_factory.h b/src/mongo/base/concept/clone_factory.h
index 5db13c17de9..d263311b79d 100644
--- a/src/mongo/base/concept/clone_factory.h
+++ b/src/mongo/base/concept/clone_factory.h
@@ -34,16 +34,16 @@
namespace mongo {
namespace concept {
-/*!
- * Objects conforming to the `CloneFactory` concept are function-like constructs which return
- * objects that are dynamically allocated copies of their inputs.
- * These copies can be made without knowing the actual dynamic type. The `CloneFactory` type itself
- * must be `Assignable`, in that it can be used with automatically generated copy constructors and
- * copy assignment operators.
- */
-template <typename T>
-struct CloneFactory : Assignable {
- Constructible<UniquePtr<T>> operator()(const T*) const;
-};
+ /*!
+ * Objects conforming to the `CloneFactory` concept are function-like constructs which return
+ * objects that are dynamically allocated copies of their inputs.
+ * These copies can be made without knowing the actual dynamic type. The `CloneFactory` type
+ * itself must be `Assignable`, in that it can be used with automatically generated copy
+ * constructors and copy assignment operators.
+ */
+ template <typename T>
+ struct CloneFactory : Assignable {
+ Constructible<UniquePtr<T>> operator()(const T*) const;
+ };
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/constructible.h b/src/mongo/base/concept/constructible.h
index b0f6d81adc5..f725c952d36 100644
--- a/src/mongo/base/concept/constructible.h
+++ b/src/mongo/base/concept/constructible.h
@@ -35,32 +35,31 @@
namespace mongo {
namespace concept {
-/**
- * The Constructable trait indicates whether `T` is constructible from `Constructible`.
- *
- * RETURNS: true if `T{ std::declval< Constructible >() }` is a valid expression and false
- * otherwise.
- */
-template <typename T, typename Constructible, typename = void>
-struct is_constructible : std::false_type {};
+ /**
+ * The Constructable trait indicates whether `T` is constructible from `Constructible`.
+ *
+ * RETURNS: true if `T{ std::declval< Constructible >() }` is a valid expression and false
+ * otherwise.
+ */
+ template <typename T, typename Constructible, typename = void>
+ struct is_constructible : std::false_type {};
-template <typename T, typename Constructible>
-struct is_constructible<T,
- Constructible,
- stdx::void_t<decltype(T{std::declval<Constructible<T>>()})>>
- : std::true_type {};
+ template <typename T, typename Constructible>
+ struct is_constructible<T,
+ Constructible,
+ stdx::void_t<decltype(T{std::declval<Constructible<T>>()})>>
+ : std::true_type {};
-/**
- * The Constructable concept models a type which can be passed to a single-argument constructor of
- * `T`.
- * This is not possible to describe in the type `Constructible`.
- *
- * The expression: `T{ std::declval< Constructible< T > >() }` should be valid.
- *
- * This concept is more broadly applicable than `ConvertibleTo`. `ConvertibleTo` uses implicit
- * conversion, whereas `Constructible` uses direct construction.
- */
-template <typename T>
-struct Constructible {};
+ /**
+ * The Constructable concept models a type which can be passed to a single-argument constructor
+ * of `T`. This is not possible to describe in the type `Constructible`.
+ *
+ * The expression: `T{ std::declval< Constructible< T > >() }` should be valid.
+ *
+ * This concept is more broadly applicable than `ConvertibleTo`. `ConvertibleTo` uses implicit
+ * conversion, whereas `Constructible` uses direct construction.
+ */
+ template <typename T>
+ struct Constructible {};
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/convertible_to.h b/src/mongo/base/concept/convertible_to.h
index 7cf7e86a73f..9f9187126d5 100644
--- a/src/mongo/base/concept/convertible_to.h
+++ b/src/mongo/base/concept/convertible_to.h
@@ -30,13 +30,13 @@
namespace mongo {
namespace concept {
-/**
- * The ConvertibleTo concept models a type which can be converted implicitly into a `T`.
- * The code: `T x; x= ConvertibleTo< T >{};` should be valid.
- */
-template <typename T>
-struct ConvertibleTo {
- operator T();
-}
+ /**
+ * The ConvertibleTo concept models a type which can be converted implicitly into a `T`.
+ * The code: `T x; x= ConvertibleTo< T >{};` should be valid.
+ */
+ template <typename T>
+ struct ConvertibleTo {
+ operator T();
+ }
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/copy_assignable.h b/src/mongo/base/concept/copy_assignable.h
index e89d4699e87..580325564e0 100644
--- a/src/mongo/base/concept/copy_assignable.h
+++ b/src/mongo/base/concept/copy_assignable.h
@@ -30,17 +30,17 @@
namespace mongo {
namespace concept {
-/**
- * The CopyAssignable concept models a type which can be copy assigned.
- *
- * The expression: `copyAssignable= copyAssignable` should be valid.
- */
-struct CopyAssignable {
/**
- * The copy assignment operator is required by `CopyAssignable`.
- * NOTE: Copy Assignment is only required on lvalue targets of `CopyAssignable`.
+ * The CopyAssignable concept models a type which can be copy assigned.
+ *
+ * The expression: `copyAssignable= copyAssignable` should be valid.
*/
- CopyAssignable& operator=(const CopyAssignable&) &;
-};
+ struct CopyAssignable {
+ /**
+ * The copy assignment operator is required by `CopyAssignable`.
+ * NOTE: Copy Assignment is only required on lvalue targets of `CopyAssignable`.
+ */
+ CopyAssignable& operator=(const CopyAssignable&) &;
+ };
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/copy_constructible.h b/src/mongo/base/concept/copy_constructible.h
index 68d8cab494a..689f8e44b71 100644
--- a/src/mongo/base/concept/copy_constructible.h
+++ b/src/mongo/base/concept/copy_constructible.h
@@ -30,13 +30,13 @@
namespace mongo {
namespace concept {
-/**
- * The CopyConstructable concept models a type which can be copy constructed.
- *
- * The expression: `CopyConstructible{ copyConstructible }` should be valid.
- */
-struct CopyConstructible {
- CopyConstructible(const CopyConstructible&);
-};
+ /**
+ * The CopyConstructable concept models a type which can be copy constructed.
+ *
+ * The expression: `CopyConstructible{ copyConstructible }` should be valid.
+ */
+ struct CopyConstructible {
+ CopyConstructible(const CopyConstructible&);
+ };
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/unique_ptr.h b/src/mongo/base/concept/unique_ptr.h
index e014a6d8a14..b7518963c54 100644
--- a/src/mongo/base/concept/unique_ptr.h
+++ b/src/mongo/base/concept/unique_ptr.h
@@ -32,38 +32,38 @@
namespace mongo {
namespace concept {
-/**
- * The `UniquePtr` Concept models a movable owning pointer of an object.
- * `std::unique_ptr< T >` is a model of `mongo::concept::UniquePtr< T >`.
- */
-template <typename T>
-struct UniquePtr {
- /** The `UniquePtr< T >` must retire its pointer to `T` on destruction. */
- ~UniquePtr() noexcept;
+ /**
+ * The `UniquePtr` Concept models a movable owning pointer of an object.
+ * `std::unique_ptr< T >` is a model of `mongo::concept::UniquePtr< T >`.
+ */
+ template <typename T>
+ struct UniquePtr {
+ /** The `UniquePtr< T >` must retire its pointer to `T` on destruction. */
+ ~UniquePtr() noexcept;
- UniquePtr(UniquePtr&& p);
- UniquePtr& operator=(UniquePtr&& p);
+ UniquePtr(UniquePtr&& p);
+ UniquePtr& operator=(UniquePtr&& p);
- UniquePtr();
- UniquePtr(T* p);
+ UniquePtr();
+ UniquePtr(T* p);
- ConvertibleTo<T*> operator->() const;
- T& operator*() const;
+ ConvertibleTo<T*> operator->() const;
+ T& operator*() const;
- explicit operator bool() const;
+ explicit operator bool() const;
- ConvertibleTo<T*> get() const;
+ ConvertibleTo<T*> get() const;
- void reset() noexcept;
- void reset(ConvertibleTo<T*>);
-};
+ void reset() noexcept;
+ void reset(ConvertibleTo<T*>);
+ };
-/*! A `UniquePtr` object must be equality comparable. */
-template <typename T>
-bool operator==(const UniquePtr<T>& lhs, const UniquePtr<T>& rhs);
+ /*! A `UniquePtr` object must be equality comparable. */
+ template <typename T>
+ bool operator==(const UniquePtr<T>& lhs, const UniquePtr<T>& rhs);
-/*! A `UniquePtr` object must be inequality comparable. */
-template <typename T>
-bool operator!=(const UniquePtr<T>& lhs, const UniquePtr<T>& rhs);
+ /*! A `UniquePtr` object must be inequality comparable. */
+ template <typename T>
+ bool operator!=(const UniquePtr<T>& lhs, const UniquePtr<T>& rhs);
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/data_type_validated_test.cpp b/src/mongo/base/data_type_validated_test.cpp
index e6e63f4592a..392ef98989b 100644
--- a/src/mongo/base/data_type_validated_test.cpp
+++ b/src/mongo/base/data_type_validated_test.cpp
@@ -61,8 +61,8 @@ struct Validator<char> {
namespace {
using namespace mongo;
-using std::end;
using std::begin;
+using std::end;
TEST(DataTypeValidated, SuccessfulValidation) {
char buf[1];
diff --git a/src/mongo/base/encoded_value_storage_test.cpp b/src/mongo/base/encoded_value_storage_test.cpp
index e9a70a819e3..2a6ed09b5e2 100644
--- a/src/mongo/base/encoded_value_storage_test.cpp
+++ b/src/mongo/base/encoded_value_storage_test.cpp
@@ -117,7 +117,7 @@ public:
Value(ZeroInitTag_t zit) : EncodedValueStorage<Layout, ConstView, View>(zit) {}
};
-}
+} // namespace EncodedValueStorageTest
TEST(EncodedValueStorage, EncodedValueStorage) {
EncodedValueStorageTest::Value raw;
diff --git a/src/mongo/base/global_initializer_registerer.h b/src/mongo/base/global_initializer_registerer.h
index 14345a3f98a..08b0ba625bc 100644
--- a/src/mongo/base/global_initializer_registerer.h
+++ b/src/mongo/base/global_initializer_registerer.h
@@ -51,42 +51,42 @@ extern const std::string& defaultInitializerName();
class GlobalInitializerRegisterer {
public:
/**
- * Constructor parameters:
- *
- * - std::string name
- *
- * - InitializerFunction initFn
- * Must be nonnull.
- * Example expression:
- *
- * [](InitializerContext* context) {
- * // initialization code
- * return Status::OK();
- * }
- *
- * - DeinitializerFunction deinitFn
- * A deinitialization that will execute in reverse order from initialization and
- * support re-initialization. If not specified, defaults to the `nullptr` function.
- * Example expression:
- *
- * [](DeinitializerContext* context) {
- * // deinitialization code
- * return Status::OK();
- * }
- *
- * - std::vector<std::string> prerequisites
- * If not specified, defaults to {"default"}.
- *
- * - std::vector<std::string> dependents
- * If not specified, defaults to {} (no dependents).
- *
- *
- * At run time, the full set of prerequisites for `name` will be computed as the union of the
- * `prerequisites` (which can be defaulted) and all other mongo initializers that list `name` in
- * their `dependents`.
- *
- * A non-null `deinitFn` will tag the initializer as supporting re-initialization.
- */
+ * Constructor parameters:
+ *
+ * - std::string name
+ *
+ * - InitializerFunction initFn
+ * Must be nonnull.
+ * Example expression:
+ *
+ * [](InitializerContext* context) {
+ * // initialization code
+ * return Status::OK();
+ * }
+ *
+ * - DeinitializerFunction deinitFn
+ * A deinitialization that will execute in reverse order from initialization and
+ * support re-initialization. If not specified, defaults to the `nullptr` function.
+ * Example expression:
+ *
+ * [](DeinitializerContext* context) {
+ * // deinitialization code
+ * return Status::OK();
+ * }
+ *
+ * - std::vector<std::string> prerequisites
+ * If not specified, defaults to {"default"}.
+ *
+ * - std::vector<std::string> dependents
+ * If not specified, defaults to {} (no dependents).
+ *
+ *
+ * At run time, the full set of prerequisites for `name` will be computed as the union of the
+ * `prerequisites` (which can be defaulted) and all other mongo initializers that list `name` in
+ * their `dependents`.
+ *
+ * A non-null `deinitFn` will tag the initializer as supporting re-initialization.
+ */
GlobalInitializerRegisterer(std::string name,
InitializerFunction initFn,
DeinitializerFunction deinitFn = nullptr,
diff --git a/src/mongo/base/initializer.h b/src/mongo/base/initializer.h
index eff1500387c..c7297abacbf 100644
--- a/src/mongo/base/initializer.h
+++ b/src/mongo/base/initializer.h
@@ -97,14 +97,14 @@ Status runGlobalInitializers(int argc, const char* const* argv, const char* cons
void runGlobalInitializersOrDie(int argc, const char* const* argv, const char* const* envp);
/**
-* Run the global deinitializers. They will execute in reverse order from initialization.
-*
-* It's a programming error for this to fail, but if it does it will return a status other
-* than Status::OK.
-*
-* This means that the few initializers that might want to terminate the program by failing
-* should probably arrange to terminate the process themselves.
-*/
+ * Run the global deinitializers. They will execute in reverse order from initialization.
+ *
+ * It's a programming error for this to fail, but if it does it will return a status other
+ * than Status::OK.
+ *
+ * This means that the few initializers that might want to terminate the program by failing
+ * should probably arrange to terminate the process themselves.
+ */
Status runGlobalDeinitializers();
} // namespace mongo
diff --git a/src/mongo/base/initializer_function.h b/src/mongo/base/initializer_function.h
index 2050013c997..05025010a87 100644
--- a/src/mongo/base/initializer_function.h
+++ b/src/mongo/base/initializer_function.h
@@ -47,11 +47,11 @@ class DeinitializerContext;
typedef std::function<Status(InitializerContext*)> InitializerFunction;
/**
-* A DeinitializerFunction implements the behavior of a deinitializer operation.
-*
-* On successful execution, a DeinitializerFunction returns Status::OK(). It may
-* inspect and mutate the supplied DeinitializerContext.
-*/
+ * A DeinitializerFunction implements the behavior of a deinitializer operation.
+ *
+ * On successful execution, a DeinitializerFunction returns Status::OK(). It may
+ * inspect and mutate the supplied DeinitializerContext.
+ */
typedef std::function<Status(DeinitializerContext*)> DeinitializerFunction;
diff --git a/src/mongo/base/parse_number_test.cpp b/src/mongo/base/parse_number_test.cpp
index d4f42dbbb72..577f2c3b38d 100644
--- a/src/mongo/base/parse_number_test.cpp
+++ b/src/mongo/base/parse_number_test.cpp
@@ -137,7 +137,10 @@ PARSE_TEST(TestRejectingBadBases) {
std::vector<Spec> specs = {{-1, "0"}, {1, "10"}, {37, "-10"}, {-1, " "}, {37, "f"}, {-1, "^%"}};
if (typeid(NumberType) == typeid(double)) {
std::vector<Spec> doubleSpecs = {
- {8, "0"}, {10, "0"}, {16, "0"}, {36, "0"},
+ {8, "0"},
+ {10, "0"},
+ {16, "0"},
+ {36, "0"},
};
std::copy(doubleSpecs.begin(), doubleSpecs.end(), std::back_inserter(specs));
}
@@ -156,7 +159,7 @@ PARSE_TEST(TestParsingNonNegatives) {
StringData spec;
int expectedValue;
} specs[] = {{"10", 10}, {"0", 0}, {"1", 1}, {"0xff", 0xff}, {"077", 077}};
- for (const auto[str, expected] : specs) {
+ for (const auto [str, expected] : specs) {
ASSERT_PARSES(NumberType, str, expected);
}
}
@@ -271,7 +274,7 @@ PARSE_TEST(TestSkipLeadingWhitespace) {
{"-077", true}};
NumberParser defaultParser;
NumberParser skipWs = NumberParser().skipWhitespace();
- for (const auto[numStr, is_negative] : specs) {
+ for (const auto [numStr, is_negative] : specs) {
NumberType expected;
bool shouldParse = !is_negative || (is_negative && std::is_signed_v<NumberType>);
@@ -322,7 +325,7 @@ PARSE_TEST(TestEndOfNum) {
"g", // since the largest inferred base is 16, next non-number character will be g
""};
NumberParser defaultParser;
- for (const auto[numStr, is_negative] : specs) {
+ for (const auto [numStr, is_negative] : specs) {
NumberType expected;
bool shouldParse = !is_negative || (is_negative && std::is_signed_v<NumberType>);
Status parsed = defaultParser(numStr, &expected);
@@ -384,7 +387,7 @@ PARSE_TEST(TestSkipLeadingWsAndEndptr) {
{"-077", true}};
StringData whitespaces[] = {" ", "", "\t \t", "\r\n\n\t", "\f\v "};
NumberParser defaultParser;
- for (const auto[numStr, is_negative] : specs) {
+ for (const auto [numStr, is_negative] : specs) {
NumberType expected;
bool shouldParse = !is_negative || (is_negative && std::is_signed_v<NumberType>);
Status parsed = defaultParser(numStr, &expected);
diff --git a/src/mongo/base/unwind_test.cpp b/src/mongo/base/unwind_test.cpp
index 6afea43e89b..7b2e5129d5f 100644
--- a/src/mongo/base/unwind_test.cpp
+++ b/src/mongo/base/unwind_test.cpp
@@ -135,10 +135,10 @@ void assertTraceContains(const std::string (&names)[size], const std::string sta
auto pos = remainder.find(name);
if (pos == remainder.npos) {
- unittest::log().setIsTruncatable(false) << std::endl
- << "--- BEGIN SAMPLE BACKTRACE ---" << std::endl
- << std::string(stacktrace)
- << "--- END SAMPLE BACKTRACE ---";
+ unittest::log().setIsTruncatable(false)
+ << std::endl
+ << "--- BEGIN SAMPLE BACKTRACE ---" << std::endl
+ << std::string(stacktrace) << "--- END SAMPLE BACKTRACE ---";
FAIL("name '{}' is missing or out of order in sample backtrace"_format(
std::string(name)));
}
@@ -149,7 +149,12 @@ void assertTraceContains(const std::string (&names)[size], const std::string sta
TEST(Unwind, Demangled) {
// Trickery with std::vector<std::function> is to hide from the optimizer.
Context ctx{{
- callNext<0>, callNext<1>, callNext<2>, callNext<3>, callNext<4>, callNext<5>,
+ callNext<0>,
+ callNext<1>,
+ callNext<2>,
+ callNext<3>,
+ callNext<4>,
+ callNext<5>,
}};
ctx.plan.back()(ctx);
// Check that these function names appear in the trace, in order.
diff --git a/src/mongo/bson/bson_obj_test.cpp b/src/mongo/bson/bson_obj_test.cpp
index e966af9d559..1b7715b5946 100644
--- a/src/mongo/bson/bson_obj_test.cpp
+++ b/src/mongo/bson/bson_obj_test.cpp
@@ -637,10 +637,7 @@ TEST(BSONObj, getFields) {
TEST(BSONObj, getFieldsWithDuplicates) {
auto e = BSON("a" << 2 << "b"
<< "3"
- << "a"
- << 9
- << "b"
- << 10);
+ << "a" << 9 << "b" << 10);
std::array<StringData, 2> fieldNames{"a", "b"};
std::array<BSONElement, 2> fields;
e.getFields(fieldNames, &fields);
diff --git a/src/mongo/bson/bson_validate_test.cpp b/src/mongo/bson/bson_validate_test.cpp
index 94fb4ec269b..479977dd930 100644
--- a/src/mongo/bson/bson_validate_test.cpp
+++ b/src/mongo/bson/bson_validate_test.cpp
@@ -41,8 +41,8 @@
namespace {
using namespace mongo;
-using std::unique_ptr;
using std::endl;
+using std::unique_ptr;
void appendInvalidStringElement(const char* fieldName, BufBuilder* bb) {
// like a BSONObj string, but without a NUL terminator.
@@ -150,23 +150,16 @@ TEST(BSONValidate, Fuzz) {
log() << "BSONValidate Fuzz random seed: " << seed << endl;
PseudoRandom randomSource(seed);
- BSONObj original = BSON("one" << 3 << "two" << 5 << "three" << BSONObj() << "four"
- << BSON("five" << BSON("six" << 11))
- << "seven"
- << BSON_ARRAY("a"
- << "bb"
- << "ccc"
- << 5)
- << "eight"
- << BSONDBRef("rrr", OID("01234567890123456789aaaa"))
- << "_id"
- << OID("deadbeefdeadbeefdeadbeef")
- << "nine"
- << BSONBinData("\x69\xb7", 2, BinDataGeneral)
- << "ten"
- << Date_t::fromMillisSinceEpoch(44)
- << "eleven"
- << BSONRegEx("foooooo", "i"));
+ BSONObj original =
+ BSON("one" << 3 << "two" << 5 << "three" << BSONObj() << "four"
+ << BSON("five" << BSON("six" << 11)) << "seven"
+ << BSON_ARRAY("a"
+ << "bb"
+ << "ccc" << 5)
+ << "eight" << BSONDBRef("rrr", OID("01234567890123456789aaaa")) << "_id"
+ << OID("deadbeefdeadbeefdeadbeef") << "nine"
+ << BSONBinData("\x69\xb7", 2, BinDataGeneral) << "ten"
+ << Date_t::fromMillisSinceEpoch(44) << "eleven" << BSONRegEx("foooooo", "i"));
int32_t fuzzFrequencies[] = {2, 10, 20, 100, 1000};
for (size_t i = 0; i < sizeof(fuzzFrequencies) / sizeof(int32_t); ++i) {
@@ -245,8 +238,9 @@ TEST(BSONValidateFast, Simple3) {
}
TEST(BSONValidateFast, NestedObject) {
- BSONObj x = BSON("a" << 1 << "b" << BSON("c" << 2 << "d" << BSONArrayBuilder().obj() << "e"
- << BSON_ARRAY("1" << 2 << 3)));
+ BSONObj x = BSON("a" << 1 << "b"
+ << BSON("c" << 2 << "d" << BSONArrayBuilder().obj() << "e"
+ << BSON_ARRAY("1" << 2 << 3)));
ASSERT_OK(validateBSON(x.objdata(), x.objsize(), BSONVersion::kLatest));
ASSERT_NOT_OK(validateBSON(x.objdata(), x.objsize() / 2, BSONVersion::kLatest));
}
@@ -323,13 +317,10 @@ TEST(BSONValidateFast, StringHasSomething) {
bb.appendStr("x", /*withNUL*/ true);
bb.appendNum(0);
const BSONObj x = ob.done();
- ASSERT_EQUALS(5 // overhead
- +
- 1 // type
- +
- 2 // name
- +
- 4 // size
+ ASSERT_EQUALS(5 // overhead
+ + 1 // type
+ + 2 // name
+ + 4 // size
,
x.objsize());
ASSERT_NOT_OK(validateBSON(x.objdata(), x.objsize(), BSONVersion::kLatest));
diff --git a/src/mongo/bson/bsonelement.cpp b/src/mongo/bson/bsonelement.cpp
index 4820437c359..7562b7ca408 100644
--- a/src/mongo/bson/bsonelement.cpp
+++ b/src/mongo/bson/bsonelement.cpp
@@ -599,8 +599,8 @@ StatusWith<long long> BSONElement::parseIntegerElementToLong() const {
// NaN doubles are rejected.
if (std::isnan(eDouble)) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Expected an integer, but found NaN in: "
- << toString(true, true));
+ str::stream()
+ << "Expected an integer, but found NaN in: " << toString(true, true));
}
// No integral doubles that are too large to be represented as a 64 bit signed integer.
@@ -609,8 +609,8 @@ StatusWith<long long> BSONElement::parseIntegerElementToLong() const {
if (eDouble >= kLongLongMaxPlusOneAsDouble ||
eDouble < std::numeric_limits<long long>::min()) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Cannot represent as a 64-bit integer: "
- << toString(true, true));
+ str::stream()
+ << "Cannot represent as a 64-bit integer: " << toString(true, true));
}
// This checks if elem is an integral double.
@@ -625,8 +625,8 @@ StatusWith<long long> BSONElement::parseIntegerElementToLong() const {
number = numberDecimal().toLongExact(&signalingFlags);
if (signalingFlags != Decimal128::kNoFlag) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Cannot represent as a 64-bit integer: "
- << toString(true, true));
+ str::stream()
+ << "Cannot represent as a 64-bit integer: " << toString(true, true));
}
} else {
number = numberLong();
@@ -697,7 +697,7 @@ BSONElement BSONElement::operator[](StringData field) const {
}
namespace {
-NOINLINE_DECL void msgAssertedBadType[[noreturn]](int8_t type) {
+NOINLINE_DECL void msgAssertedBadType [[noreturn]] (int8_t type) {
msgasserted(10320, str::stream() << "BSONElement: bad type " << (int)type);
}
} // namespace
diff --git a/src/mongo/bson/bsonelement.h b/src/mongo/bson/bsonelement.h
index c5a08e8a9cc..151872be545 100644
--- a/src/mongo/bson/bsonelement.h
+++ b/src/mongo/bson/bsonelement.h
@@ -117,8 +117,7 @@ public:
double Number() const {
uassert(13118,
str::stream() << "expected " << fieldName()
- << " to have a numeric type, but it is a "
- << type(),
+ << " to have a numeric type, but it is a " << type(),
isNumber());
return number();
}
@@ -951,4 +950,4 @@ inline BSONElement::BSONElement() {
fieldNameSize_ = 0;
totalSize = 1;
}
-}
+} // namespace mongo
diff --git a/src/mongo/bson/bsonelement_test.cpp b/src/mongo/bson/bsonelement_test.cpp
index 5c036ebeb23..f98ccf93894 100644
--- a/src/mongo/bson/bsonelement_test.cpp
+++ b/src/mongo/bson/bsonelement_test.cpp
@@ -128,15 +128,13 @@ TEST(BSONElement, ExtractLargeSubObject) {
}
TEST(BSONElement, SafeNumberLongPositiveBound) {
- BSONObj obj = BSON("kLongLongMaxPlusOneAsDouble"
- << BSONElement::kLongLongMaxPlusOneAsDouble
- << "towardsZero"
- << std::nextafter(BSONElement::kLongLongMaxPlusOneAsDouble, 0.0)
- << "towardsInfinity"
- << std::nextafter(BSONElement::kLongLongMaxPlusOneAsDouble,
- std::numeric_limits<double>::max())
- << "positiveInfinity"
- << std::numeric_limits<double>::infinity());
+ BSONObj obj =
+ BSON("kLongLongMaxPlusOneAsDouble"
+ << BSONElement::kLongLongMaxPlusOneAsDouble << "towardsZero"
+ << std::nextafter(BSONElement::kLongLongMaxPlusOneAsDouble, 0.0) << "towardsInfinity"
+ << std::nextafter(BSONElement::kLongLongMaxPlusOneAsDouble,
+ std::numeric_limits<double>::max())
+ << "positiveInfinity" << std::numeric_limits<double>::infinity());
// kLongLongMaxPlusOneAsDouble is the least double value that will overflow a 64-bit signed
// two's-complement integer. Historically, converting this value with safeNumberLong() would
@@ -182,13 +180,10 @@ TEST(BSONElement, SafeNumberLongNegativeBound) {
static_cast<double>(std::numeric_limits<long long>::lowest());
BSONObj obj =
BSON("lowestLongLongAsDouble" // This comment forces clang-format to break here.
- << lowestLongLongAsDouble
- << "towardsZero"
- << std::nextafter(lowestLongLongAsDouble, 0.0)
- << "towardsNegativeInfinity"
+ << lowestLongLongAsDouble << "towardsZero"
+ << std::nextafter(lowestLongLongAsDouble, 0.0) << "towardsNegativeInfinity"
<< std::nextafter(lowestLongLongAsDouble, std::numeric_limits<double>::lowest())
- << "negativeInfinity"
- << -std::numeric_limits<double>::infinity());
+ << "negativeInfinity" << -std::numeric_limits<double>::infinity());
ASSERT_EQ(obj["lowestLongLongAsDouble"].safeNumberLongForHash(),
std::numeric_limits<long long>::lowest());
diff --git a/src/mongo/bson/bsonmisc.h b/src/mongo/bson/bsonmisc.h
index cfc56a63ef2..f2bf2467292 100644
--- a/src/mongo/bson/bsonmisc.h
+++ b/src/mongo/bson/bsonmisc.h
@@ -281,4 +281,4 @@ private:
// considers order
bool fieldsMatch(const BSONObj& lhs, const BSONObj& rhs);
-}
+} // namespace mongo
diff --git a/src/mongo/bson/bsonobj.cpp b/src/mongo/bson/bsonobj.cpp
index cecc98f299c..07aab647110 100644
--- a/src/mongo/bson/bsonobj.cpp
+++ b/src/mongo/bson/bsonobj.cpp
@@ -391,8 +391,8 @@ Status BSONObj::storageValidEmbedded() const {
if (name.startsWith("$")) {
if (first &&
// $ref is a collection name and must be a String
- (name == "$ref") &&
- e.type() == String && (i.next().fieldNameStringData() == "$id")) {
+ (name == "$ref") && e.type() == String &&
+ (i.next().fieldNameStringData() == "$id")) {
first = false;
// keep inspecting fields for optional "$db"
e = i.next();
diff --git a/src/mongo/bson/bsonobj.h b/src/mongo/bson/bsonobj.h
index 9cefccb7d78..d05108706ee 100644
--- a/src/mongo/bson/bsonobj.h
+++ b/src/mongo/bson/bsonobj.h
@@ -131,7 +131,7 @@ public:
/** Construct a BSONObj from data in the proper format.
* Use this constructor when something else owns bsonData's buffer
- */
+ */
template <typename Traits = DefaultSizeTrait>
explicit BSONObj(const char* bsonData, Traits t = Traits{}) {
init<Traits>(bsonData);
@@ -142,8 +142,8 @@ public:
_ownedBuffer(std::move(ownedBuffer)) {}
/** Move construct a BSONObj */
- BSONObj(BSONObj&& other) noexcept : _objdata(std::move(other._objdata)),
- _ownedBuffer(std::move(other._ownedBuffer)) {
+ BSONObj(BSONObj&& other) noexcept
+ : _objdata(std::move(other._objdata)), _ownedBuffer(std::move(other._ownedBuffer)) {
other._objdata = BSONObj()._objdata; // To return to an empty state.
dassert(!other.isOwned());
}
@@ -367,7 +367,7 @@ public:
* this.extractFieldsUnDotted({a : 1 , c : 1}) -> {"" : 4 , "" : 6 }
* this.extractFieldsUnDotted({b : "blah"}) -> {"" : 5}
*
- */
+ */
BSONObj extractFieldsUnDotted(const BSONObj& pattern) const;
BSONObj filterFieldsUndotted(const BSONObj& filter, bool inFilter) const;
@@ -696,7 +696,7 @@ private:
class BSONObjIterator {
public:
/** Create an iterator for a BSON object.
- */
+ */
explicit BSONObjIterator(const BSONObj& jso) {
int sz = jso.objsize();
if (MONGO_unlikely(sz == 0)) {
@@ -789,7 +789,7 @@ protected:
private:
const int _nfields;
- const std::unique_ptr<const char* []> _fields;
+ const std::unique_ptr<const char*[]> _fields;
int _cur;
};
diff --git a/src/mongo/bson/bsonobjbuilder.h b/src/mongo/bson/bsonobjbuilder.h
index 08720e7e18e..1eb052a7c21 100644
--- a/src/mongo/bson/bsonobjbuilder.h
+++ b/src/mongo/bson/bsonobjbuilder.h
@@ -614,7 +614,7 @@ public:
* destructive
* The returned BSONObj will free the buffer when it is finished.
* @return owned BSONObj
- */
+ */
template <typename BSONTraits = BSONObj::DefaultSizeTrait>
BSONObj obj() {
massert(10335, "builder does not own memory", owned());
diff --git a/src/mongo/bson/bsonobjbuilder_test.cpp b/src/mongo/bson/bsonobjbuilder_test.cpp
index c5854c38681..a807a19d35d 100644
--- a/src/mongo/bson/bsonobjbuilder_test.cpp
+++ b/src/mongo/bson/bsonobjbuilder_test.cpp
@@ -286,8 +286,7 @@ TEST(BSONObjBuilderTest, ResumeBuildingWithNesting) {
ASSERT_BSONOBJ_EQ(obj,
BSON("ll" << BSON("f" << BSON("cc"
<< "dd"))
- << "a"
- << BSON("c" << 3)));
+ << "a" << BSON("c" << 3)));
}
TEST(BSONObjBuilderTest, ResetToEmptyResultsInEmptyObj) {
@@ -483,12 +482,10 @@ TEST(BSONObjBuilderTest, SizeChecks) {
BSONObjBuilder builder;
ASSERT_THROWS(
[&]() {
-
for (StringData character : {"a", "b", "c"}) {
builder.append(character, obj);
}
BSONObj finalObj = builder.obj<BSONObj::LargeSizeTrait>();
-
}(),
DBException);
}
diff --git a/src/mongo/bson/json.cpp b/src/mongo/bson/json.cpp
index 5d4acb31b01..b71cd7d3d4e 100644
--- a/src/mongo/bson/json.cpp
+++ b/src/mongo/bson/json.cpp
@@ -47,9 +47,9 @@
namespace mongo {
-using std::unique_ptr;
using std::ostringstream;
using std::string;
+using std::unique_ptr;
using namespace fmt::literals;
#if 0
@@ -1259,7 +1259,7 @@ StatusWith<Date_t> JParse::parseDate() {
Status parsedStatus = NumberParser::strToAny(10)(_input, &msSinceEpoch, &endptr);
if (parsedStatus == ErrorCodes::Overflow) {
/* Need to handle this because jsonString outputs the value of Date_t as unsigned.
- * See SERVER-8330 and SERVER-8573 */
+ * See SERVER-8330 and SERVER-8573 */
unsigned long long oldDate; // Date_t used to be stored as unsigned long longs
parsedStatus = NumberParser::strToAny(10)(_input, &oldDate, &endptr);
if (parsedStatus == ErrorCodes::Overflow) {
diff --git a/src/mongo/bson/oid_test.cpp b/src/mongo/bson/oid_test.cpp
index 54053860396..a33db2d90b1 100644
--- a/src/mongo/bson/oid_test.cpp
+++ b/src/mongo/bson/oid_test.cpp
@@ -166,4 +166,4 @@ TEST(Basic, FromTerm) {
ASSERT_OK(mongo::NumberParser::strToAny()(oidTail, &oidTailInt));
ASSERT_EQUALS(term, oidTailInt);
}
-}
+} // namespace
diff --git a/src/mongo/bson/ordering.h b/src/mongo/bson/ordering.h
index fe8bf48533f..793e14820f5 100644
--- a/src/mongo/bson/ordering.h
+++ b/src/mongo/bson/ordering.h
@@ -88,4 +88,4 @@ public:
return Ordering(b);
}
};
-}
+} // namespace mongo
diff --git a/src/mongo/bson/timestamp.cpp b/src/mongo/bson/timestamp.cpp
index 3f967766206..14cc982ff49 100644
--- a/src/mongo/bson/timestamp.cpp
+++ b/src/mongo/bson/timestamp.cpp
@@ -74,4 +74,4 @@ BSONObj Timestamp::toBSON() const {
bldr.append("", *this);
return bldr.obj();
}
-}
+} // namespace mongo
diff --git a/src/mongo/bson/ugly_bson_integration_test.cpp b/src/mongo/bson/ugly_bson_integration_test.cpp
index 77a96a14400..5b9d7a2c28c 100644
--- a/src/mongo/bson/ugly_bson_integration_test.cpp
+++ b/src/mongo/bson/ugly_bson_integration_test.cpp
@@ -57,10 +57,7 @@ TEST_F(UglyBSONFixture, DuplicateFields) {
assertCommandFailsOnServer("admin",
BSON("insert"
<< "test"
- << "documents"
- << BSONArray()
- << "documents"
- << BSONArray()),
+ << "documents" << BSONArray() << "documents" << BSONArray()),
ErrorCodes::duplicateCodeForTest(40413));
}
diff --git a/src/mongo/bson/util/bson_check.h b/src/mongo/bson/util/bson_check.h
index 1ca748d88a2..b969ab7a2a6 100644
--- a/src/mongo/bson/util/bson_check.h
+++ b/src/mongo/bson/util/bson_check.h
@@ -56,8 +56,8 @@ Status bsonCheckOnlyHasFieldsImpl(StringData objectName,
if (!allowed(name)) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Unexpected field " << e.fieldName() << " in "
- << objectName);
+ str::stream()
+ << "Unexpected field " << e.fieldName() << " in " << objectName);
}
bool& seenBefore = seenFields[name];
@@ -65,8 +65,8 @@ Status bsonCheckOnlyHasFieldsImpl(StringData objectName,
seenBefore = true;
} else {
return Status(ErrorCodes::Error(51000),
- str::stream() << "Field " << name << " appears multiple times in "
- << objectName);
+ str::stream()
+ << "Field " << name << " appears multiple times in " << objectName);
}
}
return Status::OK();
@@ -105,10 +105,7 @@ Status bsonCheckOnlyHasFieldsForCommand(StringData objectName,
inline void checkBSONType(BSONType expectedType, const BSONElement& elem) {
uassert(elem.type() == BSONType::EOO ? ErrorCodes::NoSuchKey : ErrorCodes::TypeMismatch,
str::stream() << "Wrong type for '" << elem.fieldNameStringData() << "'. Expected a "
- << typeName(expectedType)
- << ", got a "
- << typeName(elem.type())
- << '.',
+ << typeName(expectedType) << ", got a " << typeName(elem.type()) << '.',
elem.type() == expectedType);
}
diff --git a/src/mongo/bson/util/bson_check_test.cpp b/src/mongo/bson/util/bson_check_test.cpp
index f220ee1e0ca..93716c84a6b 100644
--- a/src/mongo/bson/util/bson_check_test.cpp
+++ b/src/mongo/bson/util/bson_check_test.cpp
@@ -52,26 +52,19 @@ TEST(BsonCheck, CheckHasOnlyLegalFields) {
ASSERT_OK(bsonCheckOnlyHasFields("",
BSON("aField"
<< "value"
- << "thirdField"
- << 1
- << "anotherField"
- << 2),
+ << "thirdField" << 1 << "anotherField" << 2),
legals));
ASSERT_OK(bsonCheckOnlyHasFields("",
BSON("aField"
<< "value"
- << "thirdField"
- << 1),
+ << "thirdField" << 1),
legals));
ASSERT_EQUALS(ErrorCodes::BadValue,
bsonCheckOnlyHasFields("",
BSON("aField"
<< "value"
- << "illegal"
- << 4
- << "thirdField"
- << 1),
+ << "illegal" << 4 << "thirdField" << 1),
legals));
}
diff --git a/src/mongo/bson/util/bson_extract.cpp b/src/mongo/bson/util/bson_extract.cpp
index acd52ce7d82..4335d1c3699 100644
--- a/src/mongo/bson/util/bson_extract.cpp
+++ b/src/mongo/bson/util/bson_extract.cpp
@@ -65,10 +65,9 @@ Status bsonExtractTypedFieldImpl(const BSONObj& object,
return status;
if (type != outElement->type()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "\"" << fieldName << "\" had the wrong type. Expected "
- << typeName(type)
- << ", found "
- << typeName(outElement->type()));
+ str::stream()
+ << "\"" << fieldName << "\" had the wrong type. Expected "
+ << typeName(type) << ", found " << typeName(outElement->type()));
}
return status;
}
@@ -83,9 +82,9 @@ Status bsonExtractIntegerFieldImpl(const BSONObj& object,
return status;
if (!element.isNumber()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Expected field \"" << fieldName
- << "\" to have numeric type, but found "
- << typeName(element.type()));
+ str::stream()
+ << "Expected field \"" << fieldName
+ << "\" to have numeric type, but found " << typeName(element.type()));
}
long long result = element.safeNumberLong();
if (result != element.numberDouble()) {
@@ -109,9 +108,9 @@ Status bsonExtractDoubleFieldImpl(const BSONObj& object,
return status;
if (!element.isNumber()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Expected field \"" << fieldName
- << "\" to have numeric type, but found "
- << typeName(element.type()));
+ str::stream()
+ << "Expected field \"" << fieldName
+ << "\" to have numeric type, but found " << typeName(element.type()));
}
*out = element.numberDouble();
return status;
@@ -155,8 +154,7 @@ Status bsonExtractBooleanFieldWithDefault(const BSONObj& object,
if (!element.isNumber() && !element.isBoolean()) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected boolean or number type for field \"" << fieldName
- << "\", found "
- << typeName(element.type()));
+ << "\", found " << typeName(element.type()));
}
*out = element.trueValue();
return status;
@@ -261,8 +259,7 @@ Status bsonExtractIntegerFieldWithDefaultIf(const BSONObj& object,
if (!pred(*out)) {
return Status(ErrorCodes::BadValue,
str::stream() << "Invalid value in field \"" << fieldName << "\": " << *out
- << ": "
- << predDescription);
+ << ": " << predDescription);
}
return status;
}
diff --git a/src/mongo/bson/util/bson_extract_test.cpp b/src/mongo/bson/util/bson_extract_test.cpp
index dbb76f1a9c7..a06aa202900 100644
--- a/src/mongo/bson/util/bson_extract_test.cpp
+++ b/src/mongo/bson/util/bson_extract_test.cpp
@@ -90,12 +90,10 @@ TEST(ExtractBSON, ExtractStringFieldWithDefault) {
TEST(ExtractBSON, ExtractBooleanFieldWithDefault) {
BSONObj obj1 = BSON("a" << 1 << "b"
<< "hello"
- << "c"
- << true);
+ << "c" << true);
BSONObj obj2 = BSON("a" << 0 << "b"
<< "hello"
- << "c"
- << false);
+ << "c" << false);
bool b;
b = false;
ASSERT_OK(bsonExtractBooleanFieldWithDefault(obj1, "a", false, &b));
diff --git a/src/mongo/bson/util/builder_test.cpp b/src/mongo/bson/util/builder_test.cpp
index 8ceb8c2c5b8..a1c1f127eb0 100644
--- a/src/mongo/bson/util/builder_test.cpp
+++ b/src/mongo/bson/util/builder_test.cpp
@@ -124,4 +124,4 @@ TEST(Builder, AppendUnsignedLongLong) {
TEST(Builder, AppendShort) {
testStringBuilderIntegral<short>();
}
-}
+} // namespace mongo
diff --git a/src/mongo/client/authenticate.cpp b/src/mongo/client/authenticate.cpp
index 52055262b78..f035312e4f7 100644
--- a/src/mongo/client/authenticate.cpp
+++ b/src/mongo/client/authenticate.cpp
@@ -119,8 +119,7 @@ StatusWith<OpMsgRequest> createX509AuthCmd(const BSONObj& params, StringData cli
return OpMsgRequest::fromDBAndBody(db.getValue(),
BSON("authenticate" << 1 << "mechanism"
<< "MONGODB-X509"
- << "user"
- << username));
+ << "user" << username));
}
// Use the MONGODB-X509 protocol to authenticate as "username." The certificate details
@@ -241,14 +240,11 @@ BSONObj getInternalAuthParams(size_t idx, const std::string& mechanism) {
internalSecurity.user->getName().getUser().toString(), password);
}
- return BSON(saslCommandMechanismFieldName << mechanism << saslCommandUserDBFieldName
- << internalSecurity.user->getName().getDB()
- << saslCommandUserFieldName
- << internalSecurity.user->getName().getUser()
- << saslCommandPasswordFieldName
- << password
- << saslCommandDigestPasswordFieldName
- << false);
+ return BSON(saslCommandMechanismFieldName
+ << mechanism << saslCommandUserDBFieldName
+ << internalSecurity.user->getName().getDB() << saslCommandUserFieldName
+ << internalSecurity.user->getName().getUser() << saslCommandPasswordFieldName
+ << password << saslCommandDigestPasswordFieldName << false);
}
Future<std::string> negotiateSaslMechanism(RunCommandHook runCommand,
@@ -313,14 +309,10 @@ BSONObj buildAuthParams(StringData dbname,
StringData username,
StringData passwordText,
bool digestPassword) {
- return BSON(saslCommandMechanismFieldName << "SCRAM-SHA-1" << saslCommandUserDBFieldName
- << dbname
- << saslCommandUserFieldName
- << username
- << saslCommandPasswordFieldName
- << passwordText
- << saslCommandDigestPasswordFieldName
- << digestPassword);
+ return BSON(saslCommandMechanismFieldName
+ << "SCRAM-SHA-1" << saslCommandUserDBFieldName << dbname << saslCommandUserFieldName
+ << username << saslCommandPasswordFieldName << passwordText
+ << saslCommandDigestPasswordFieldName << digestPassword);
}
StringData getSaslCommandUserDBFieldName() {
diff --git a/src/mongo/client/authenticate_test.cpp b/src/mongo/client/authenticate_test.cpp
index c7d477b36c0..c72f6ddba04 100644
--- a/src/mongo/client/authenticate_test.cpp
+++ b/src/mongo/client/authenticate_test.cpp
@@ -127,11 +127,7 @@ public:
<< "MONGODB-CR"
<< "db"
<< "admin"
- << "user"
- << _username
- << "pwd"
- << _password
- << "digest"
+ << "user" << _username << "pwd" << _password << "digest"
<< "true");
}
@@ -141,8 +137,7 @@ public:
pushRequest("$external",
BSON("authenticate" << 1 << "mechanism"
<< "MONGODB-X509"
- << "user"
- << _username));
+ << "user" << _username));
// 2. Client receives 'ok'
pushResponse(BSON("ok" << 1));
@@ -152,8 +147,7 @@ public:
<< "MONGODB-X509"
<< "db"
<< "$external"
- << "user"
- << _username);
+ << "user" << _username);
}
diff --git a/src/mongo/client/connection_string_connect.cpp b/src/mongo/client/connection_string_connect.cpp
index 81e3bdaef4f..14b3f8f08ff 100644
--- a/src/mongo/client/connection_string_connect.cpp
+++ b/src/mongo/client/connection_string_connect.cpp
@@ -110,4 +110,4 @@ std::unique_ptr<DBClientBase> ConnectionString::connect(StringData applicationNa
MONGO_UNREACHABLE;
}
-} // namepspace mongo
+} // namespace mongo
diff --git a/src/mongo/client/constants.h b/src/mongo/client/constants.h
index c96076c84ef..5f282e0da7f 100644
--- a/src/mongo/client/constants.h
+++ b/src/mongo/client/constants.h
@@ -49,4 +49,4 @@ enum ResultFlagType {
*/
ResultFlag_AwaitCapable = 8
};
-}
+} // namespace mongo
diff --git a/src/mongo/client/cyrus_sasl_client_session.cpp b/src/mongo/client/cyrus_sasl_client_session.cpp
index e6ddd6e3a7f..85c8c575ca1 100644
--- a/src/mongo/client/cyrus_sasl_client_session.cpp
+++ b/src/mongo/client/cyrus_sasl_client_session.cpp
@@ -147,8 +147,7 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(CyrusSaslClientContext,
if (result != SASL_OK) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Could not initialize sasl client components ("
- << sasl_errstring(result, nullptr, nullptr)
- << ")");
+ << sasl_errstring(result, nullptr, nullptr) << ")");
}
SaslClientSession::create = createCyrusSaslClientSession;
@@ -311,4 +310,4 @@ Status CyrusSaslClientSession::step(StringData inputData, std::string* outputDat
return Status(ErrorCodes::ProtocolError, sasl_errdetail(_saslConnection));
}
}
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp
index 3c896123850..893c4e0fab7 100644
--- a/src/mongo/client/dbclient_base.cpp
+++ b/src/mongo/client/dbclient_base.cpp
@@ -73,11 +73,11 @@
namespace mongo {
-using std::unique_ptr;
using std::endl;
using std::list;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
using executor::RemoteCommandRequest;
@@ -221,23 +221,16 @@ std::pair<rpc::UniqueReply, DBClientBase*> DBClientBase::runCommandWithTarget(
// more helpful error message. Note that call() can itself throw a socket exception.
uassert(ErrorCodes::HostUnreachable,
str::stream() << "network error while attempting to run "
- << "command '"
- << request.getCommandName()
- << "' "
- << "on host '"
- << host
- << "' ",
+ << "command '" << request.getCommandName() << "' "
+ << "on host '" << host << "' ",
call(requestMsg, replyMsg, false, &host));
auto commandReply = parseCommandReplyMessage(host, replyMsg);
uassert(ErrorCodes::RPCProtocolNegotiationFailed,
str::stream() << "Mismatched RPC protocols - request was '"
- << networkOpToString(requestMsg.operation())
- << "' '"
- << " but reply was '"
- << networkOpToString(replyMsg.operation())
- << "' ",
+ << networkOpToString(requestMsg.operation()) << "' '"
+ << " but reply was '" << networkOpToString(replyMsg.operation()) << "' ",
rpc::protocolForMessage(requestMsg) == commandReply->getProtocol());
return {std::move(commandReply), this};
@@ -314,8 +307,7 @@ bool DBClientBase::runPseudoCommand(StringData db,
if (status == ErrorCodes::CommandResultSchemaViolation) {
msgasserted(28624,
str::stream() << "Received bad " << realCommandName
- << " response from server: "
- << info);
+ << " response from server: " << info);
} else if (status == ErrorCodes::CommandNotFound) {
NamespaceString pseudoCommandNss(db, pseudoCommandCol);
// if this throws we just let it escape as that's how runCommand works.
@@ -614,10 +606,7 @@ void DBClientBase::findN(vector<BSONObj>& out,
uassert(10276,
str::stream() << "DBClientBase::findN: transport error: " << getServerAddress()
- << " ns: "
- << ns
- << " query: "
- << query.toString(),
+ << " ns: " << ns << " query: " << query.toString(),
c.get());
if (c->hasResultFlag(ResultFlag_ShardConfigStale)) {
diff --git a/src/mongo/client/dbclient_base.h b/src/mongo/client/dbclient_base.h
index a7cdfc43c3b..d4d5bc315e0 100644
--- a/src/mongo/client/dbclient_base.h
+++ b/src/mongo/client/dbclient_base.h
@@ -123,7 +123,7 @@ public:
/** query N objects from the database into an array. makes sense mostly when you want a small
* number of results. if a huge number, use query() and iterate the cursor.
- */
+ */
void findN(std::vector<BSONObj>& out,
const std::string& ns,
Query query,
@@ -293,9 +293,9 @@ public:
int options = 0);
/**
- * Authenticates to another cluster member using appropriate authentication data.
- * @return true if the authentication was successful
- */
+ * Authenticates to another cluster member using appropriate authentication data.
+ * @return true if the authentication was successful
+ */
virtual Status authenticateInternalUser();
/**
diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp
index 8477a5edae9..bc65eb35938 100644
--- a/src/mongo/client/dbclient_connection.cpp
+++ b/src/mongo/client/dbclient_connection.cpp
@@ -80,10 +80,10 @@
namespace mongo {
-using std::unique_ptr;
using std::endl;
using std::map;
using std::string;
+using std::unique_ptr;
MONGO_FAIL_POINT_DEFINE(dbClientConnectionDisableChecksum);
@@ -109,8 +109,8 @@ private:
};
/**
-* Initializes the wire version of conn, and returns the isMaster reply.
-*/
+ * Initializes the wire version of conn, and returns the isMaster reply.
+ */
executor::RemoteCommandResponse initWireVersion(DBClientConnection* conn,
StringData applicationName,
const MongoURI& uri,
@@ -327,8 +327,7 @@ Status DBClientConnection::connectSocketOnly(const HostAndPort& serverAddress) {
if (!sws.isOK()) {
return Status(ErrorCodes::HostUnreachable,
str::stream() << "couldn't connect to server " << _serverAddress.toString()
- << ", connection attempt failed: "
- << sws.getStatus());
+ << ", connection attempt failed: " << sws.getStatus());
}
{
@@ -622,9 +621,7 @@ bool DBClientConnection::call(Message& toSend,
if (assertOk)
uasserted(10278,
str::stream() << "dbclient error communicating with server "
- << getServerAddress()
- << ": "
- << redact(errStatus));
+ << getServerAddress() << ": " << redact(errStatus));
return false;
};
@@ -672,7 +669,7 @@ void DBClientConnection::checkResponse(const std::vector<BSONObj>& batch,
string* host) {
/* check for errors. the only one we really care about at
* this stage is "not master"
- */
+ */
*retry = false;
*host = _serverAddress.toString();
@@ -701,8 +698,7 @@ void DBClientConnection::handleNotMasterResponse(const BSONObj& replyBody,
monitor->failedHost(_serverAddress,
{ErrorCodes::NotMaster,
str::stream() << "got not master from: " << _serverAddress
- << " of repl set: "
- << _parentReplSetName});
+ << " of repl set: " << _parentReplSetName});
}
_markFailed(kSetFlag);
diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp
index 16be475d4b0..a49267ad152 100644
--- a/src/mongo/client/dbclient_cursor.cpp
+++ b/src/mongo/client/dbclient_cursor.cpp
@@ -60,9 +60,9 @@
namespace mongo {
-using std::unique_ptr;
using std::endl;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace {
@@ -256,7 +256,7 @@ void DBClientCursor::requestMore() {
invariant(_scopedHost.size());
DBClientBase::withConnection_do_not_use(_scopedHost, [&](DBClientBase* conn) {
- ON_BLOCK_EXIT([&, origClient = _client ] { _client = origClient; });
+ ON_BLOCK_EXIT([&, origClient = _client] { _client = origClient; });
_client = conn;
doRequestMore();
});
diff --git a/src/mongo/client/dbclient_cursor_test.cpp b/src/mongo/client/dbclient_cursor_test.cpp
index 292ce2c8bb5..234c68ce4af 100644
--- a/src/mongo/client/dbclient_cursor_test.cpp
+++ b/src/mongo/client/dbclient_cursor_test.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/client/dbclient_cursor.h"
#include "mongo/client/dbclient_connection.h"
+#include "mongo/client/dbclient_cursor.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index b7e2044f93c..978cef3182e 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -49,12 +49,12 @@
namespace mongo {
-using std::shared_ptr;
-using std::unique_ptr;
using std::endl;
using std::map;
using std::set;
+using std::shared_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace {
@@ -314,9 +314,9 @@ DBClientConnection* DBClientReplicaSet::checkMaster() {
}
if (newConn == nullptr || !errmsg.empty()) {
- const std::string message = str::stream() << "can't connect to new replica set master ["
- << _masterHost.toString() << "]"
- << (errmsg.empty() ? "" : ", err: ") << errmsg;
+ const std::string message = str::stream()
+ << "can't connect to new replica set master [" << _masterHost.toString() << "]"
+ << (errmsg.empty() ? "" : ", err: ") << errmsg;
monitor->failedHost(_masterHost, {ErrorCodes::Error(40659), message});
uasserted(ErrorCodes::FailedToSatisfyReadPreference, message);
}
@@ -537,9 +537,9 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID
<< _getMonitor()->getName() << ", read pref is " << readPref->toString()
<< " (primary : "
<< (_master.get() != nullptr ? _master->getServerAddress() : "[not cached]")
- << ", lastTagged : " << (_lastSlaveOkConn.get() != nullptr
- ? _lastSlaveOkConn->getServerAddress()
- : "[not cached]")
+ << ", lastTagged : "
+ << (_lastSlaveOkConn.get() != nullptr ? _lastSlaveOkConn->getServerAddress()
+ : "[not cached]")
<< ")" << endl;
string lastNodeErrMsg;
@@ -589,9 +589,9 @@ BSONObj DBClientReplicaSet::findOne(const string& ns,
<< _getMonitor()->getName() << ", read pref is " << readPref->toString()
<< " (primary : "
<< (_master.get() != nullptr ? _master->getServerAddress() : "[not cached]")
- << ", lastTagged : " << (_lastSlaveOkConn.get() != nullptr
- ? _lastSlaveOkConn->getServerAddress()
- : "[not cached]")
+ << ", lastTagged : "
+ << (_lastSlaveOkConn.get() != nullptr ? _lastSlaveOkConn->getServerAddress()
+ : "[not cached]")
<< ")" << endl;
string lastNodeErrMsg;
@@ -719,7 +719,7 @@ DBClientConnection* DBClientReplicaSet::selectNodeUsingTags(
return _master.get();
}
- auto dtor = [host = _lastSlaveOkHost.toString()](DBClientConnection * ptr) {
+ auto dtor = [host = _lastSlaveOkHost.toString()](DBClientConnection* ptr) {
globalConnPool.release(host, ptr);
};
@@ -768,9 +768,9 @@ void DBClientReplicaSet::say(Message& toSend, bool isRetry, string* actualServer
<< _getMonitor()->getName() << ", read pref is " << readPref->toString()
<< " (primary : "
<< (_master.get() != nullptr ? _master->getServerAddress() : "[not cached]")
- << ", lastTagged : " << (_lastSlaveOkConn.get() != nullptr
- ? _lastSlaveOkConn->getServerAddress()
- : "[not cached]")
+ << ", lastTagged : "
+ << (_lastSlaveOkConn.get() != nullptr ? _lastSlaveOkConn->getServerAddress()
+ : "[not cached]")
<< ")" << endl;
string lastNodeErrMsg;
@@ -882,8 +882,9 @@ void DBClientReplicaSet::checkResponse(const std::vector<BSONObj>& batch,
// query could potentially go to a secondary, so see if this is an error (or empty) and
// retry if we're not past our retry limit.
- if (networkError || (hasErrField(dataObj) && !dataObj["code"].eoo() &&
- dataObj["code"].Int() == ErrorCodes::NotMasterOrSecondary)) {
+ if (networkError ||
+ (hasErrField(dataObj) && !dataObj["code"].eoo() &&
+ dataObj["code"].Int() == ErrorCodes::NotMasterOrSecondary)) {
if (_lazyState._lastClient == _lastSlaveOkConn.get()) {
isntSecondary();
} else if (_lazyState._lastClient == _master.get()) {
@@ -905,8 +906,9 @@ void DBClientReplicaSet::checkResponse(const std::vector<BSONObj>& batch,
} else if (_lazyState._lastOp == dbQuery) {
// if query could not potentially go to a secondary, just mark the master as bad
- if (networkError || (hasErrField(dataObj) && !dataObj["code"].eoo() &&
- dataObj["code"].Int() == ErrorCodes::NotMasterNoSlaveOk)) {
+ if (networkError ||
+ (hasErrField(dataObj) && !dataObj["code"].eoo() &&
+ dataObj["code"].Int() == ErrorCodes::NotMasterNoSlaveOk)) {
if (_lazyState._lastClient == _master.get()) {
isntMaster();
}
@@ -957,8 +959,7 @@ std::pair<rpc::UniqueReply, DBClientBase*> DBClientReplicaSet::runCommandWithTar
uasserted(ErrorCodes::HostNotFound,
str::stream() << "Could not satisfy $readPreference of '" << readPref.toString()
- << "' while attempting to run command "
- << request.getCommandName());
+ << "' while attempting to run command " << request.getCommandName());
}
std::pair<rpc::UniqueReply, std::shared_ptr<DBClientBase>> DBClientReplicaSet::runCommandWithTarget(
@@ -999,9 +1000,9 @@ bool DBClientReplicaSet::call(Message& toSend,
<< _getMonitor()->getName() << ", read pref is " << readPref->toString()
<< " (primary : "
<< (_master.get() != nullptr ? _master->getServerAddress() : "[not cached]")
- << ", lastTagged : " << (_lastSlaveOkConn.get() != nullptr
- ? _lastSlaveOkConn->getServerAddress()
- : "[not cached]")
+ << ", lastTagged : "
+ << (_lastSlaveOkConn.get() != nullptr ? _lastSlaveOkConn->getServerAddress()
+ : "[not cached]")
<< ")" << endl;
for (size_t retry = 0; retry < MAX_RETRY; retry++) {
diff --git a/src/mongo/client/dbclient_rs.h b/src/mongo/client/dbclient_rs.h
index 92e2a93acb0..1ab59966676 100644
--- a/src/mongo/client/dbclient_rs.h
+++ b/src/mongo/client/dbclient_rs.h
@@ -57,8 +57,8 @@ typedef std::shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorPtr;
class DBClientReplicaSet : public DBClientBase {
public:
using DBClientBase::query;
- using DBClientBase::update;
using DBClientBase::remove;
+ using DBClientBase::update;
/** Call connect() after constructing. autoReconnect is always on for DBClientReplicaSet
* connections. */
@@ -244,7 +244,7 @@ public:
protected:
/** Authorize. Authorizes all nodes as needed
- */
+ */
void _auth(const BSONObj& params) override;
private:
@@ -353,4 +353,4 @@ protected:
} _lazyState;
};
-}
+} // namespace mongo
diff --git a/src/mongo/client/fetcher.cpp b/src/mongo/client/fetcher.cpp
index 8e707c0ce02..8843227ded5 100644
--- a/src/mongo/client/fetcher.cpp
+++ b/src/mongo/client/fetcher.cpp
@@ -75,13 +75,12 @@ Status parseCursorResponse(const BSONObj& obj,
if (cursorElement.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "cursor response must contain '" << kCursorFieldName
- << "' field: "
- << obj);
+ << "' field: " << obj);
}
if (!cursorElement.isABSONObj()) {
- return Status(
- ErrorCodes::FailedToParse,
- str::stream() << "'" << kCursorFieldName << "' field must be an object: " << obj);
+ return Status(ErrorCodes::FailedToParse,
+ str::stream()
+ << "'" << kCursorFieldName << "' field must be an object: " << obj);
}
BSONObj cursorObj = cursorElement.Obj();
@@ -89,17 +88,13 @@ Status parseCursorResponse(const BSONObj& obj,
if (cursorIdElement.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "cursor response must contain '" << kCursorFieldName << "."
- << kCursorIdFieldName
- << "' field: "
- << obj);
+ << kCursorIdFieldName << "' field: " << obj);
}
if (cursorIdElement.type() != mongo::NumberLong) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << kCursorFieldName << "." << kCursorIdFieldName
<< "' field must be a 'long' but was a '"
- << typeName(cursorIdElement.type())
- << "': "
- << obj);
+ << typeName(cursorIdElement.type()) << "': " << obj);
}
batchData->cursorId = cursorIdElement.numberLong();
@@ -107,25 +102,19 @@ Status parseCursorResponse(const BSONObj& obj,
if (namespaceElement.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "cursor response must contain "
- << "'"
- << kCursorFieldName
- << "."
- << kNamespaceFieldName
- << "' field: "
- << obj);
+ << "'" << kCursorFieldName << "." << kNamespaceFieldName
+ << "' field: " << obj);
}
if (namespaceElement.type() != mongo::String) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << kCursorFieldName << "." << kNamespaceFieldName
- << "' field must be a string: "
- << obj);
+ << "' field must be a string: " << obj);
}
const NamespaceString tempNss(namespaceElement.valueStringData());
if (!tempNss.isValid()) {
return Status(ErrorCodes::BadValue,
str::stream() << "'" << kCursorFieldName << "." << kNamespaceFieldName
- << "' contains an invalid namespace: "
- << obj);
+ << "' contains an invalid namespace: " << obj);
}
batchData->nss = tempNss;
@@ -133,27 +122,20 @@ Status parseCursorResponse(const BSONObj& obj,
if (batchElement.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "cursor response must contain '" << kCursorFieldName << "."
- << batchFieldName
- << "' field: "
- << obj);
+ << batchFieldName << "' field: " << obj);
}
if (!batchElement.isABSONObj()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << kCursorFieldName << "." << batchFieldName
- << "' field must be an array: "
- << obj);
+ << "' field must be an array: " << obj);
}
BSONObj batchObj = batchElement.Obj();
for (auto itemElement : batchObj) {
if (!itemElement.isABSONObj()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "found non-object " << itemElement << " in "
- << "'"
- << kCursorFieldName
- << "."
- << batchFieldName
- << "' field: "
- << obj);
+ << "'" << kCursorFieldName << "." << batchFieldName
+ << "' field: " << obj);
}
batchData->documents.push_back(itemElement.Obj());
}
diff --git a/src/mongo/client/fetcher_test.cpp b/src/mongo/client/fetcher_test.cpp
index 19713755b11..f0fa26cf7b3 100644
--- a/src/mongo/client/fetcher_test.cpp
+++ b/src/mongo/client/fetcher_test.cpp
@@ -405,8 +405,7 @@ TEST_F(FetcherTest, FindCommandFailed2) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "bad hint"
- << "code"
- << int(ErrorCodes::BadValue)),
+ << "code" << int(ErrorCodes::BadValue)),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
@@ -432,10 +431,8 @@ TEST_F(FetcherTest, CursorIdFieldMissing) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("ns"
<< "db.coll"
- << "firstBatch"
- << BSONArray())
- << "ok"
- << 1),
+ << "firstBatch" << BSONArray())
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -446,10 +443,8 @@ TEST_F(FetcherTest, CursorIdNotLongNumber) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 123.1 << "ns"
<< "db.coll"
- << "firstBatch"
- << BSONArray())
- << "ok"
- << 1),
+ << "firstBatch" << BSONArray())
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -469,11 +464,11 @@ TEST_F(FetcherTest, NamespaceFieldMissing) {
TEST_F(FetcherTest, NamespaceNotAString) {
ASSERT_OK(fetcher->schedule());
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 123LL << "ns" << 123 << "firstBatch" << BSONArray()) << "ok"
- << 1),
- ReadyQueueState::kEmpty,
- FetcherState::kInactive);
+ processNetworkResponse(BSON("cursor"
+ << BSON("id" << 123LL << "ns" << 123 << "firstBatch" << BSONArray())
+ << "ok" << 1),
+ ReadyQueueState::kEmpty,
+ FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_STRING_CONTAINS(status.reason(), "'cursor.ns' field must be a string");
}
@@ -482,10 +477,8 @@ TEST_F(FetcherTest, NamespaceEmpty) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 123LL << "ns"
<< ""
- << "firstBatch"
- << BSONArray())
- << "ok"
- << 1),
+ << "firstBatch" << BSONArray())
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
@@ -496,10 +489,8 @@ TEST_F(FetcherTest, NamespaceMissingCollectionName) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 123LL << "ns"
<< "db."
- << "firstBatch"
- << BSONArray())
- << "ok"
- << 1),
+ << "firstBatch" << BSONArray())
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
@@ -510,8 +501,7 @@ TEST_F(FetcherTest, FirstBatchFieldMissing) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll")
- << "ok"
- << 1),
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -522,10 +512,8 @@ TEST_F(FetcherTest, FirstBatchNotAnArray) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch"
- << 123)
- << "ok"
- << 1),
+ << "firstBatch" << 123)
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -536,10 +524,8 @@ TEST_F(FetcherTest, FirstBatchArrayContainsNonObject) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(8))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(8))
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -551,10 +537,8 @@ TEST_F(FetcherTest, FirstBatchEmptyArray) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSONArray())
- << "ok"
- << 1),
+ << "firstBatch" << BSONArray())
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_OK(status);
@@ -568,10 +552,8 @@ TEST_F(FetcherTest, FetchOneDocument) {
const BSONObj doc = BSON("_id" << 1);
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_OK(status);
@@ -596,10 +578,8 @@ TEST_F(FetcherTest, SetNextActionToContinueWhenNextBatchIsNotAvailable) {
};
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_OK(status);
@@ -629,10 +609,8 @@ TEST_F(FetcherTest, FetchMultipleBatches) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
Milliseconds(100),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -650,10 +628,8 @@ TEST_F(FetcherTest, FetchMultipleBatches) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(doc2))
- << "ok"
- << 1),
+ << "nextBatch" << BSON_ARRAY(doc2))
+ << "ok" << 1),
Milliseconds(200),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -671,10 +647,8 @@ TEST_F(FetcherTest, FetchMultipleBatches) {
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(doc3))
- << "ok"
- << 1),
+ << "nextBatch" << BSON_ARRAY(doc3))
+ << "ok" << 1),
Milliseconds(300),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
@@ -698,10 +672,8 @@ TEST_F(FetcherTest, ScheduleGetMoreAndCancel) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -715,10 +687,8 @@ TEST_F(FetcherTest, ScheduleGetMoreAndCancel) {
const BSONObj doc2 = BSON("_id" << 2);
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(doc2))
- << "ok"
- << 1),
+ << "nextBatch" << BSON_ARRAY(doc2))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -761,10 +731,8 @@ TEST_F(FetcherTest, CancelDuringCallbackPutsFetcherInShutdown) {
const BSONObj doc = BSON("_id" << 1);
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kInactive);
@@ -782,10 +750,8 @@ TEST_F(FetcherTest, ScheduleGetMoreButShutdown) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -800,10 +766,8 @@ TEST_F(FetcherTest, ScheduleGetMoreButShutdown) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(doc2))
- << "ok"
- << 1),
+ << "nextBatch" << BSON_ARRAY(doc2))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -839,10 +803,8 @@ TEST_F(FetcherTest, EmptyGetMoreRequestAfterFirstBatchMakesFetcherInactiveAndKil
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kInactive);
@@ -896,10 +858,8 @@ TEST_F(FetcherTest, UpdateNextActionAfterSecondBatch) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -916,10 +876,8 @@ TEST_F(FetcherTest, UpdateNextActionAfterSecondBatch) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(doc2))
- << "ok"
- << 1),
+ << "nextBatch" << BSON_ARRAY(doc2))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kInactive);
@@ -993,10 +951,8 @@ TEST_F(FetcherTest, ShutdownDuringSecondBatch) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -1016,10 +972,8 @@ TEST_F(FetcherTest, ShutdownDuringSecondBatch) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(doc2))
- << "ok"
- << 1),
+ << "nextBatch" << BSON_ARRAY(doc2))
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
@@ -1059,10 +1013,8 @@ TEST_F(FetcherTest, FetcherAppliesRetryPolicyToFirstCommandButNotToGetMoreReques
processNetworkResponse(rs, ReadyQueueState::kHasReadyRequests, FetcherState::kActive);
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
ASSERT_OK(status);
@@ -1110,10 +1062,8 @@ TEST_F(FetcherTest, FetcherResetsInternalFinishCallbackFunctionPointerAfterLastC
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSONArray())
- << "ok"
- << 1),
+ << "firstBatch" << BSONArray())
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
diff --git a/src/mongo/client/mongo_uri.cpp b/src/mongo/client/mongo_uri.cpp
index e11f9b0a06f..433c33c8e61 100644
--- a/src/mongo/client/mongo_uri.cpp
+++ b/src/mongo/client/mongo_uri.cpp
@@ -169,8 +169,7 @@ MongoURI::OptionsMap parseOptions(StringData options, StringData url) {
if (opt.empty()) {
uasserted(ErrorCodes::FailedToParse,
str::stream()
- << "Missing a key/value pair in the options for mongodb:// URL: "
- << url);
+ << "Missing a key/value pair in the options for mongodb:// URL: " << url);
}
const auto kvPair = partitionForward(opt, '=');
@@ -190,8 +189,7 @@ MongoURI::OptionsMap parseOptions(StringData options, StringData url) {
if (valRaw.empty()) {
uasserted(ErrorCodes::FailedToParse,
str::stream() << "Missing value for key '" << keyRaw
- << "' in the options for mongodb:// URL: "
- << url);
+ << "' in the options for mongodb:// URL: " << url);
}
const auto val = uassertStatusOKWithContext(
uriDecode(valRaw),
@@ -259,8 +257,7 @@ URIParts::URIParts(StringData uri) {
if (schemeEnd == std::string::npos) {
uasserted(ErrorCodes::FailedToParse,
str::stream() << "URI must begin with " << kURIPrefix << " or " << kURISRVPrefix
- << ": "
- << uri);
+ << ": " << uri);
}
const auto uriWithoutPrefix = uri.substr(schemeEnd + 3);
scheme = uri.substr(0, schemeEnd);
@@ -380,10 +377,10 @@ MongoURI MongoURI::parseImpl(const std::string& url) {
}
if ((host.find('/') != std::string::npos) && !StringData(host).endsWith(".sock")) {
- uasserted(
- ErrorCodes::FailedToParse,
- str::stream() << "'" << host << "' in '" << url
- << "' appears to be a unix socket, but does not end in '.sock'");
+ uasserted(ErrorCodes::FailedToParse,
+ str::stream()
+ << "'" << host << "' in '" << url
+ << "' appears to be a unix socket, but does not end in '.sock'");
}
servers.push_back(uassertStatusOK(HostAndPort::parse(host)));
diff --git a/src/mongo/client/mongo_uri_test.cpp b/src/mongo/client/mongo_uri_test.cpp
index 729d04d8d83..6aa3e4fc5b3 100644
--- a/src/mongo/client/mongo_uri_test.cpp
+++ b/src/mongo/client/mongo_uri_test.cpp
@@ -828,7 +828,8 @@ TEST(MongoURI, srvRecordTest) {
{"localhost.sub.test.build.10gen.cc", 27017},
},
{
- {"ssl", "true"}, {"replicaSet", "repl0"},
+ {"ssl", "true"},
+ {"replicaSet", "repl0"},
},
success},
@@ -842,7 +843,8 @@ TEST(MongoURI, srvRecordTest) {
{"localhost.sub.test.build.10gen.cc", 27017},
},
{
- {"ssl", "true"}, {"replicaSet", "repl0"},
+ {"ssl", "true"},
+ {"replicaSet", "repl0"},
},
success},
@@ -988,19 +990,19 @@ TEST(MongoURI, srvRecordTest) {
for (const auto& test : tests) {
auto rs = MongoURI::parse(test.uri);
if (test.expectation == failure) {
- ASSERT_FALSE(rs.getStatus().isOK()) << "Failing URI: " << test.uri
- << " data on line: " << test.lineNumber;
+ ASSERT_FALSE(rs.getStatus().isOK())
+ << "Failing URI: " << test.uri << " data on line: " << test.lineNumber;
continue;
}
ASSERT_OK(rs.getStatus()) << "Failed on URI: " << test.uri
<< " data on line: " << test.lineNumber;
auto rv = rs.getValue();
- ASSERT_EQ(rv.getUser(), test.user) << "Failed on URI: " << test.uri
- << " data on line: " << test.lineNumber;
- ASSERT_EQ(rv.getPassword(), test.password) << "Failed on URI: " << test.uri
- << " data on line : " << test.lineNumber;
- ASSERT_EQ(rv.getDatabase(), test.database) << "Failed on URI: " << test.uri
- << " data on line : " << test.lineNumber;
+ ASSERT_EQ(rv.getUser(), test.user)
+ << "Failed on URI: " << test.uri << " data on line: " << test.lineNumber;
+ ASSERT_EQ(rv.getPassword(), test.password)
+ << "Failed on URI: " << test.uri << " data on line : " << test.lineNumber;
+ ASSERT_EQ(rv.getDatabase(), test.database)
+ << "Failed on URI: " << test.uri << " data on line : " << test.lineNumber;
compareOptions(test.lineNumber, test.uri, rv.getOptions(), test.options);
std::vector<HostAndPort> hosts(begin(rv.getServers()), end(rv.getServers()));
@@ -1009,9 +1011,9 @@ TEST(MongoURI, srvRecordTest) {
std::sort(begin(expectedHosts), end(expectedHosts));
for (std::size_t i = 0; i < std::min(hosts.size(), expectedHosts.size()); ++i) {
- ASSERT_EQ(hosts[i], expectedHosts[i]) << "Failed on URI: " << test.uri
- << " at host number" << i
- << " data on line: " << test.lineNumber;
+ ASSERT_EQ(hosts[i], expectedHosts[i])
+ << "Failed on URI: " << test.uri << " at host number" << i
+ << " data on line: " << test.lineNumber;
}
ASSERT_TRUE(hosts.size() == expectedHosts.size())
<< "Failed on URI: " << test.uri << " Found " << hosts.size() << " hosts, expected "
diff --git a/src/mongo/client/native_sasl_client_session.cpp b/src/mongo/client/native_sasl_client_session.cpp
index b7f51fa0f68..3fa871bda5d 100644
--- a/src/mongo/client/native_sasl_client_session.cpp
+++ b/src/mongo/client/native_sasl_client_session.cpp
@@ -98,4 +98,4 @@ Status NativeSaslClientSession::step(StringData inputData, std::string* outputDa
}
return status.getStatus();
}
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/client/query_spec.h b/src/mongo/client/query_spec.h
index c115f50cd70..534a841842f 100644
--- a/src/mongo/client/query_spec.h
+++ b/src/mongo/client/query_spec.h
@@ -116,11 +116,7 @@ public:
std::string toString() const {
return str::stream() << "QSpec "
<< BSON("ns" << _ns << "n2skip" << _ntoskip << "n2return" << _ntoreturn
- << "options"
- << _options
- << "query"
- << _query
- << "fields"
+ << "options" << _options << "query" << _query << "fields"
<< _fields);
}
};
diff --git a/src/mongo/client/read_preference.cpp b/src/mongo/client/read_preference.cpp
index 4391db7c388..b71825ad395 100644
--- a/src/mongo/client/read_preference.cpp
+++ b/src/mongo/client/read_preference.cpp
@@ -86,16 +86,9 @@ StatusWith<ReadPreference> parseReadPreferenceMode(StringData prefStr) {
}
return Status(ErrorCodes::FailedToParse,
str::stream() << "Could not parse $readPreference mode '" << prefStr
- << "'. Only the modes '"
- << kPrimaryOnly
- << "', '"
- << kPrimaryPreferred
- << "', '"
- << kSecondaryOnly
- << "', '"
- << kSecondaryPreferred
- << "', and '"
- << kNearest
+ << "'. Only the modes '" << kPrimaryOnly << "', '"
+ << kPrimaryPreferred << "', '" << kSecondaryOnly << "', '"
+ << kSecondaryPreferred << "', and '" << kNearest
<< "' are supported.");
}
@@ -206,8 +199,8 @@ StatusWith<ReadPreferenceSetting> ReadPreferenceSetting::fromInnerBSON(const BSO
if (maxStalenessSecondsValue && maxStalenessSecondsValue < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << kMaxStalenessSecondsFieldName
- << " must be a non-negative integer");
+ str::stream()
+ << kMaxStalenessSecondsFieldName << " must be a non-negative integer");
}
if (maxStalenessSecondsValue && maxStalenessSecondsValue >= Seconds::max().count()) {
@@ -218,9 +211,9 @@ StatusWith<ReadPreferenceSetting> ReadPreferenceSetting::fromInnerBSON(const BSO
if (maxStalenessSecondsValue && maxStalenessSecondsValue < kMinimalMaxStalenessValue.count()) {
return Status(ErrorCodes::MaxStalenessOutOfRange,
- str::stream() << kMaxStalenessSecondsFieldName
- << " value can not be less than "
- << kMinimalMaxStalenessValue.count());
+ str::stream()
+ << kMaxStalenessSecondsFieldName << " value can not be less than "
+ << kMinimalMaxStalenessValue.count());
}
if ((mode == ReadPreference::PrimaryOnly) && maxStalenessSecondsValue) {
@@ -236,9 +229,7 @@ StatusWith<ReadPreferenceSetting> ReadPreferenceSetting::fromInnerBSON(const BSO
if (elem.type() != mongo::Object) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "$readPreference has incorrect type: expected "
- << mongo::Object
- << " but got "
- << elem.type());
+ << mongo::Object << " but got " << elem.type());
}
return fromInnerBSON(elem.Obj());
}
diff --git a/src/mongo/client/read_preference_test.cpp b/src/mongo/client/read_preference_test.cpp
index f3a0dc78941..92bc2516be0 100644
--- a/src/mongo/client/read_preference_test.cpp
+++ b/src/mongo/client/read_preference_test.cpp
@@ -55,8 +55,7 @@ TEST(ReadPreferenceSetting, ParseValid) {
// that the tags are parsed as the empty TagSet.
checkParse(BSON("mode"
<< "primary"
- << "tags"
- << BSON_ARRAY(BSONObj())),
+ << "tags" << BSON_ARRAY(BSONObj())),
ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet::primaryOnly()));
checkParse(BSON("mode"
@@ -69,14 +68,12 @@ TEST(ReadPreferenceSetting, ParseValid) {
<< "ny")))));
checkParse(BSON("mode"
<< "secondary"
- << "maxStalenessSeconds"
- << kMinMaxStaleness.count()),
+ << "maxStalenessSeconds" << kMinMaxStaleness.count()),
ReadPreferenceSetting(ReadPreference::SecondaryOnly, kMinMaxStaleness));
checkParse(BSON("mode"
<< "secondary"
- << "maxStalenessSeconds"
- << 0),
+ << "maxStalenessSeconds" << 0),
ReadPreferenceSetting(ReadPreference::SecondaryOnly, Seconds(0)));
checkParse(BSON("mode"
@@ -84,8 +81,7 @@ TEST(ReadPreferenceSetting, ParseValid) {
<< "tags"
<< BSON_ARRAY(BSON("dc"
<< "ny"))
- << "maxStalenessSeconds"
- << kMinMaxStaleness.count()),
+ << "maxStalenessSeconds" << kMinMaxStaleness.count()),
ReadPreferenceSetting(ReadPreference::SecondaryOnly,
TagSet(BSON_ARRAY(BSON("dc"
<< "ny"))),
@@ -149,8 +145,7 @@ TEST(ReadPreferenceSetting, ParseInvalid) {
// maxStalenessSeconds is negative
checkParseFails(BSON("mode"
<< "secondary"
- << "maxStalenessSeconds"
- << -1));
+ << "maxStalenessSeconds" << -1));
// maxStalenessSeconds is NaN
checkParseFails(BSON("mode"
@@ -161,8 +156,7 @@ TEST(ReadPreferenceSetting, ParseInvalid) {
// maxStalenessSeconds and primary
checkParseFails(BSON("mode"
<< "primary"
- << "maxStalenessSeconds"
- << kMinMaxStaleness.count()));
+ << "maxStalenessSeconds" << kMinMaxStaleness.count()));
// maxStalenessSeconds is less than min
checkParseFailsWithError(BSON("mode"
@@ -174,13 +168,11 @@ TEST(ReadPreferenceSetting, ParseInvalid) {
// maxStalenessSeconds is greater than max
checkParseFails(BSON("mode"
<< "secondary"
- << "maxStalenessSeconds"
- << Seconds::max().count()));
+ << "maxStalenessSeconds" << Seconds::max().count()));
checkParseContainerFailsWithError(BSON("$query" << BSON("pang"
<< "pong")
- << "$readPreference"
- << 2),
+ << "$readPreference" << 2),
ErrorCodes::TypeMismatch);
}
diff --git a/src/mongo/client/remote_command_retry_scheduler_test.cpp b/src/mongo/client/remote_command_retry_scheduler_test.cpp
index 6a543bc76f7..7940be98674 100644
--- a/src/mongo/client/remote_command_retry_scheduler_test.cpp
+++ b/src/mongo/client/remote_command_retry_scheduler_test.cpp
@@ -401,8 +401,7 @@ TEST_F(RemoteCommandRetrySchedulerTest, SchedulerIgnoresEmbeddedErrorInSuccessfu
// wire protocol.
ResponseStatus response(BSON("ok" << 0 << "code" << int(ErrorCodes::FailedToParse) << "errmsg"
<< "injected error"
- << "z"
- << 456),
+ << "z" << 456),
Milliseconds(100));
processNetworkResponse(response);
diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp
index aacbfe4aea3..37dee8b0766 100644
--- a/src/mongo/client/replica_set_monitor.cpp
+++ b/src/mongo/client/replica_set_monitor.cpp
@@ -57,9 +57,9 @@
namespace mongo {
-using std::shared_ptr;
using std::numeric_limits;
using std::set;
+using std::shared_ptr;
using std::string;
using std::vector;
@@ -254,7 +254,7 @@ void ReplicaSetMonitor::SetState::rescheduleRefresh(SchedulingStrategy strategy)
nextScanTime = possibleNextScanTime;
LOG(1) << "Next replica set scan scheduled for " << nextScanTime;
auto swHandle = executor->scheduleWorkAt(
- nextScanTime, [ this, anchor = shared_from_this() ](const CallbackArgs& cbArgs) {
+ nextScanTime, [this, anchor = shared_from_this()](const CallbackArgs& cbArgs) {
if (!cbArgs.status.isOK())
return;
@@ -529,7 +529,7 @@ void Refresher::scheduleNetworkRequests() {
// ensure that the call to isMaster is scheduled at most every 500ms
auto swHandle = _set->executor->scheduleWorkAt(
node->nextPossibleIsMasterCall,
- [ *this, host = ns.host ](const CallbackArgs& cbArgs) mutable {
+ [*this, host = ns.host](const CallbackArgs& cbArgs) mutable {
if (!cbArgs.status.isOK()) {
return;
}
@@ -584,7 +584,7 @@ void Refresher::scheduleIsMaster(const HostAndPort& host) {
_set->executor
->scheduleRemoteCommand(
std::move(request),
- [ copy = *this, host, timer = Timer() ](
+ [copy = *this, host, timer = Timer()](
const executor::TaskExecutor::RemoteCommandCallbackArgs& result) mutable {
stdx::lock_guard lk(copy._set->mutex);
// Ignore the reply and return if we are no longer the current scan. This might
@@ -732,8 +732,7 @@ void Refresher::receivedIsMaster(const HostAndPort& from,
failedHost(from,
{ErrorCodes::InconsistentReplicaSetNames,
str::stream() << "Target replica set name " << reply.setName
- << " does not match the monitored set name "
- << _set->name});
+ << " does not match the monitored set name " << _set->name});
return;
}
@@ -811,12 +810,11 @@ Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMa
// Reject if config version is older. This is for backwards compatibility with nodes in pv0
// since they don't have the same ordering with pv1 electionId.
if (reply.configVersion < _set->configVersion) {
- return {ErrorCodes::NotMaster,
- str::stream() << "Node " << from
- << " believes it is primary, but its config version "
- << reply.configVersion
- << " is older than the most recent config version "
- << _set->configVersion};
+ return {
+ ErrorCodes::NotMaster,
+ str::stream() << "Node " << from << " believes it is primary, but its config version "
+ << reply.configVersion << " is older than the most recent config version "
+ << _set->configVersion};
}
if (reply.electionId.isSet()) {
@@ -825,12 +823,11 @@ Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMa
// because configVersion needs to be incremented whenever the protocol version is changed.
if (reply.configVersion == _set->configVersion && _set->maxElectionId.isSet() &&
_set->maxElectionId.compare(reply.electionId) > 0) {
- return {ErrorCodes::NotMaster,
- str::stream() << "Node " << from
- << " believes it is primary, but its election id "
- << reply.electionId
- << " is older than the most recent election id "
- << _set->maxElectionId};
+ return {
+ ErrorCodes::NotMaster,
+ str::stream() << "Node " << from << " believes it is primary, but its election id "
+ << reply.electionId << " is older than the most recent election id "
+ << _set->maxElectionId};
}
_set->maxElectionId = reply.electionId;
@@ -1320,9 +1317,7 @@ void SetState::notify(bool finishedScan) {
Status SetState::makeUnsatisfedReadPrefError(const ReadPreferenceSetting& criteria) const {
return Status(ErrorCodes::FailedToSatisfyReadPreference,
str::stream() << "Could not find host matching read preference "
- << criteria.toString()
- << " for set "
- << name);
+ << criteria.toString() << " for set " << name);
}
void SetState::init() {
@@ -1415,4 +1410,4 @@ void ScanState::retryAllTriedHosts(PseudoRandom& rand) {
std::shuffle(hostsToScan.begin(), hostsToScan.end(), rand.urbg());
triedHosts = waitingFor;
}
-}
+} // namespace mongo
diff --git a/src/mongo/client/replica_set_monitor_internal_test.cpp b/src/mongo/client/replica_set_monitor_internal_test.cpp
index b796dff516c..1a90deed113 100644
--- a/src/mongo/client/replica_set_monitor_internal_test.cpp
+++ b/src/mongo/client/replica_set_monitor_internal_test.cpp
@@ -367,22 +367,10 @@ TEST_F(IsMasterReplyTest, IsMasterReplyRSNotInitiated) {
BSONObj ismaster = BSON(
"ismaster" << false << "secondary" << false << "info"
<< "can't get local.system.replset config from self or any seed (EMPTYCONFIG)"
- << "isreplicaset"
- << true
- << "maxBsonObjectSize"
- << 16777216
- << "maxMessageSizeBytes"
- << 48000000
- << "maxWriteBatchSize"
- << 1000
- << "localTime"
- << mongo::jsTime()
- << "maxWireVersion"
- << 2
- << "minWireVersion"
- << 0
- << "ok"
- << 1);
+ << "isreplicaset" << true << "maxBsonObjectSize" << 16777216
+ << "maxMessageSizeBytes" << 48000000 << "maxWriteBatchSize" << 1000
+ << "localTime" << mongo::jsTime() << "maxWireVersion" << 2 << "minWireVersion"
+ << 0 << "ok" << 1);
IsMasterReply imr(HostAndPort(), -1, ismaster);
@@ -401,34 +389,15 @@ TEST_F(IsMasterReplyTest, IsMasterReplyRSNotInitiated) {
TEST_F(IsMasterReplyTest, IsMasterReplyRSPrimary) {
BSONObj ismaster = BSON("setName"
<< "test"
- << "setVersion"
- << 1
- << "electionId"
- << OID("7fffffff0000000000000001")
- << "ismaster"
- << true
- << "secondary"
- << false
- << "hosts"
- << BSON_ARRAY("mongo.example:3000")
- << "primary"
+ << "setVersion" << 1 << "electionId" << OID("7fffffff0000000000000001")
+ << "ismaster" << true << "secondary" << false << "hosts"
+ << BSON_ARRAY("mongo.example:3000") << "primary"
<< "mongo.example:3000"
<< "me"
<< "mongo.example:3000"
- << "maxBsonObjectSize"
- << 16777216
- << "maxMessageSizeBytes"
- << 48000000
- << "maxWriteBatchSize"
- << 1000
- << "localTime"
- << mongo::jsTime()
- << "maxWireVersion"
- << 2
- << "minWireVersion"
- << 0
- << "ok"
- << 1);
+ << "maxBsonObjectSize" << 16777216 << "maxMessageSizeBytes" << 48000000
+ << "maxWriteBatchSize" << 1000 << "localTime" << mongo::jsTime()
+ << "maxWireVersion" << 2 << "minWireVersion" << 0 << "ok" << 1);
IsMasterReply imr(HostAndPort("mongo.example:3000"), -1, ismaster);
@@ -448,38 +417,16 @@ TEST_F(IsMasterReplyTest, IsMasterReplyRSPrimary) {
TEST_F(IsMasterReplyTest, IsMasterReplyPassiveSecondary) {
BSONObj ismaster = BSON("setName"
<< "test"
- << "setVersion"
- << 2
- << "electionId"
- << OID("7fffffff0000000000000001")
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
- << BSON_ARRAY("mongo.example:3000")
- << "passives"
- << BSON_ARRAY("mongo.example:3001")
- << "primary"
+ << "setVersion" << 2 << "electionId" << OID("7fffffff0000000000000001")
+ << "ismaster" << false << "secondary" << true << "hosts"
+ << BSON_ARRAY("mongo.example:3000") << "passives"
+ << BSON_ARRAY("mongo.example:3001") << "primary"
<< "mongo.example:3000"
- << "passive"
- << true
- << "me"
+ << "passive" << true << "me"
<< "mongo.example:3001"
- << "maxBsonObjectSize"
- << 16777216
- << "maxMessageSizeBytes"
- << 48000000
- << "maxWriteBatchSize"
- << 1000
- << "localTime"
- << mongo::jsTime()
- << "maxWireVersion"
- << 2
- << "minWireVersion"
- << 0
- << "ok"
- << 1);
+ << "maxBsonObjectSize" << 16777216 << "maxMessageSizeBytes" << 48000000
+ << "maxWriteBatchSize" << 1000 << "localTime" << mongo::jsTime()
+ << "maxWireVersion" << 2 << "minWireVersion" << 0 << "ok" << 1);
IsMasterReply imr(HostAndPort("mongo.example:3001"), -1, ismaster);
@@ -501,38 +448,15 @@ TEST_F(IsMasterReplyTest, IsMasterReplyPassiveSecondary) {
TEST_F(IsMasterReplyTest, IsMasterReplyHiddenSecondary) {
BSONObj ismaster = BSON("setName"
<< "test"
- << "setVersion"
- << 2
- << "electionId"
- << OID("7fffffff0000000000000001")
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
- << BSON_ARRAY("mongo.example:3000")
- << "primary"
+ << "setVersion" << 2 << "electionId" << OID("7fffffff0000000000000001")
+ << "ismaster" << false << "secondary" << true << "hosts"
+ << BSON_ARRAY("mongo.example:3000") << "primary"
<< "mongo.example:3000"
- << "passive"
- << true
- << "hidden"
- << true
- << "me"
+ << "passive" << true << "hidden" << true << "me"
<< "mongo.example:3001"
- << "maxBsonObjectSize"
- << 16777216
- << "maxMessageSizeBytes"
- << 48000000
- << "maxWriteBatchSize"
- << 1000
- << "localTime"
- << mongo::jsTime()
- << "maxWireVersion"
- << 2
- << "minWireVersion"
- << 0
- << "ok"
- << 1);
+ << "maxBsonObjectSize" << 16777216 << "maxMessageSizeBytes" << 48000000
+ << "maxWriteBatchSize" << 1000 << "localTime" << mongo::jsTime()
+ << "maxWireVersion" << 2 << "minWireVersion" << 0 << "ok" << 1);
IsMasterReply imr(HostAndPort("mongo.example:3001"), -1, ismaster);
@@ -552,40 +476,22 @@ TEST_F(IsMasterReplyTest, IsMasterReplyHiddenSecondary) {
TEST_F(IsMasterReplyTest, IsMasterSecondaryWithTags) {
BSONObj ismaster = BSON("setName"
<< "test"
- << "setVersion"
- << 2
- << "electionId"
- << OID("7fffffff0000000000000001")
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
+ << "setVersion" << 2 << "electionId" << OID("7fffffff0000000000000001")
+ << "ismaster" << false << "secondary" << true << "hosts"
<< BSON_ARRAY("mongo.example:3000"
<< "mongo.example:3001")
<< "primary"
<< "mongo.example:3000"
<< "me"
<< "mongo.example:3001"
- << "maxBsonObjectSize"
- << 16777216
- << "maxMessageSizeBytes"
- << 48000000
- << "maxWriteBatchSize"
- << 1000
- << "localTime"
- << mongo::jsTime()
- << "maxWireVersion"
- << 2
- << "minWireVersion"
- << 0
- << "tags"
+ << "maxBsonObjectSize" << 16777216 << "maxMessageSizeBytes" << 48000000
+ << "maxWriteBatchSize" << 1000 << "localTime" << mongo::jsTime()
+ << "maxWireVersion" << 2 << "minWireVersion" << 0 << "tags"
<< BSON("dc"
<< "nyc"
<< "use"
<< "production")
- << "ok"
- << 1);
+ << "ok" << 1);
IsMasterReply imr(HostAndPort("mongo.example:3001"), -1, ismaster);
diff --git a/src/mongo/client/replica_set_monitor_manager.cpp b/src/mongo/client/replica_set_monitor_manager.cpp
index fbf4d3a08d2..94aa109ad26 100644
--- a/src/mongo/client/replica_set_monitor_manager.cpp
+++ b/src/mongo/client/replica_set_monitor_manager.cpp
@@ -52,15 +52,15 @@
namespace mongo {
-using std::shared_ptr;
using std::set;
+using std::shared_ptr;
using std::string;
using std::vector;
using executor::NetworkInterface;
using executor::NetworkInterfaceThreadPool;
-using executor::TaskExecutorPool;
using executor::TaskExecutor;
+using executor::TaskExecutorPool;
using executor::ThreadPoolTaskExecutor;
ReplicaSetMonitorManager::ReplicaSetMonitorManager() {}
@@ -98,7 +98,7 @@ namespace {
void uassertNotMixingSSL(transport::ConnectSSLMode a, transport::ConnectSSLMode b) {
uassert(51042, "Mixing ssl modes with a single replica set is disallowed", a == b);
}
-}
+} // namespace
shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getOrCreateMonitor(
const ConnectionString& connStr) {
@@ -165,7 +165,7 @@ void ReplicaSetMonitorManager::shutdown() {
}
log() << "Dropping all ongoing scans against replica sets";
- for (auto & [ name, monitor ] : monitors) {
+ for (auto& [name, monitor] : monitors) {
auto anchor = monitor.lock();
if (!anchor) {
continue;
diff --git a/src/mongo/client/replica_set_monitor_scan_test.cpp b/src/mongo/client/replica_set_monitor_scan_test.cpp
index c75984cc985..80d5a576b4e 100644
--- a/src/mongo/client/replica_set_monitor_scan_test.cpp
+++ b/src/mongo/client/replica_set_monitor_scan_test.cpp
@@ -60,16 +60,12 @@ TEST_F(CoreScanTest, CheckAllSeedsSerial) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
+ << "ismaster" << primary << "secondary" << !primary
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
}
NextStep ns = refresher.getNextStep();
@@ -116,16 +112,12 @@ TEST_F(CoreScanTest, CheckAllSeedsParallel) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
+ << "ismaster" << primary << "secondary" << !primary
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
}
// Now all hosts have returned data
@@ -163,16 +155,11 @@ TEST_F(CoreScanTest, NoMasterInitAllUp) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
+ << "ismaster" << false << "secondary" << true << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
}
NextStep ns = refresher.getNextStep();
@@ -209,17 +196,12 @@ TEST_F(CoreScanTest, MasterNotInSeeds_NoPrimaryInIsMaster) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
+ << "ismaster" << false << "secondary" << true << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c"
<< "d")
- << "ok"
- << true));
+ << "ok" << true));
}
// Only look at "d" after exhausting all other hosts
@@ -230,17 +212,12 @@ TEST_F(CoreScanTest, MasterNotInSeeds_NoPrimaryInIsMaster) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "hosts"
+ << "ismaster" << true << "secondary" << false << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c"
<< "d")
- << "ok"
- << true));
+ << "ok" << true));
ns = refresher.getNextStep();
@@ -289,10 +266,7 @@ TEST_F(CoreScanTest, MasterNotInSeeds_PrimaryInIsMaster) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
+ << "ismaster" << primary << "secondary" << !primary
<< "primary"
<< "d"
<< "hosts"
@@ -300,8 +274,7 @@ TEST_F(CoreScanTest, MasterNotInSeeds_PrimaryInIsMaster) {
<< "b"
<< "c"
<< "d")
- << "ok"
- << true));
+ << "ok" << true));
}
NextStep ns = refresher.getNextStep();
@@ -346,14 +319,8 @@ TEST_F(CoreScanTest, SlavesUsableEvenIfNoMaster) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
- << BSON_ARRAY("a")
- << "ok"
- << true));
+ << "ismaster" << false << "secondary" << true << "hosts"
+ << BSON_ARRAY("a") << "ok" << true));
// Check intended conditions for entry to getNextStep().
ASSERT(state->currentScan->hostsToScan.empty());
@@ -398,16 +365,11 @@ TEST_F(CoreScanTest, MultipleMasterLastNodeWins) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "hosts"
+ << "ismaster" << true << "secondary" << false << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
// Ensure the set primary is the host we just got a reply from
HostAndPort currentPrimary = state->getMatchingHost(primaryOnly);
@@ -451,14 +413,9 @@ TEST_F(CoreScanTest, MasterIsSourceOfTruth) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << (primary ? primaryHosts : secondaryHosts)
- << "ok"
- << true));
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << (primary ? primaryHosts : secondaryHosts)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -505,14 +462,8 @@ TEST_F(CoreScanTest, MultipleMastersDisagree) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "hosts"
- << hostsForSeed[i % 2]
- << "ok"
- << true));
+ << "ismaster" << true << "secondary" << false << "hosts"
+ << hostsForSeed[i % 2] << "ok" << true));
// Ensure the primary is the host we just got a reply from
HostAndPort currentPrimary = state->getMatchingHost(primaryOnly);
@@ -541,14 +492,8 @@ TEST_F(CoreScanTest, MultipleMastersDisagree) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
- << hostsForSeed[0]
- << "ok"
- << true));
+ << "ismaster" << false << "secondary" << true << "hosts"
+ << hostsForSeed[0] << "ok" << true));
// scan should be complete
ns = refresher.getNextStep();
@@ -595,16 +540,12 @@ TEST_F(CoreScanTest, GetMatchingDuringScan) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
+ << "ismaster" << primary << "secondary" << !primary
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
bool hasPrimary = !(state->getMatchingHost(primaryOnly).empty());
bool hasSecondary = !(state->getMatchingHost(secondaryOnly).empty());
@@ -641,16 +582,12 @@ TEST_F(CoreScanTest, OutOfBandFailedHost) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
+ << "ismaster" << primary << "secondary" << !primary
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
if (i >= 1) {
HostAndPort a("a");
@@ -698,18 +635,13 @@ TEST_F(CoreScanTest, NewPrimaryWithMaxElectionId) {
BSON(
"setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "hosts"
+ << "ismaster" << true << "secondary" << false << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
<< "electionId"
<< OID::fromTerm(i) // electionId must increase every cycle.
- << "ok"
- << true));
+ << "ok" << true));
// Ensure the set primary is the host we just got a reply from
HostAndPort currentPrimary = state->getMatchingHost(primaryOnly);
@@ -756,18 +688,13 @@ TEST_F(CoreScanTest, IgnoreElectionIdFromSecondaries) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
+ << "ismaster" << primary << "secondary" << !primary
<< "electionId"
- << (primary ? primaryElectionId : OID::gen())
- << "hosts"
+ << (primary ? primaryElectionId : OID::gen()) << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
}
// check that the SetState's maxElectionId == primary's electionId
@@ -801,20 +728,13 @@ TEST_F(CoreScanTest, StalePrimaryWithObsoleteElectionId) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "setVersion"
- << 1
- << "electionId"
- << secondElectionId
+ << "ismaster" << true << "secondary" << false
+ << "setVersion" << 1 << "electionId" << secondElectionId
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
auto node = state->findNode(ns.host);
ASSERT(node);
@@ -834,18 +754,12 @@ TEST_F(CoreScanTest, StalePrimaryWithObsoleteElectionId) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "electionId"
- << firstElectionId
- << "hosts"
+ << "ismaster" << true << "secondary" << false
+ << "electionId" << firstElectionId << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
auto node = state->findNode(ns.host);
ASSERT(node);
@@ -867,16 +781,11 @@ TEST_F(CoreScanTest, StalePrimaryWithObsoleteElectionId) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
+ << "ismaster" << false << "secondary" << true << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
auto node = state->findNode(ns.host);
ASSERT(node);
@@ -919,20 +828,13 @@ TEST_F(CoreScanTest, TwoPrimaries2ndHasNewerConfigVersion) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "setVersion"
- << 1
- << "electionId"
- << OID("7fffffff0000000000000001")
+ << "ismaster" << true << "secondary" << false << "setVersion"
+ << 1 << "electionId" << OID("7fffffff0000000000000001")
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
// check that the SetState's maxElectionId == primary's electionId
ASSERT_EQUALS(state->maxElectionId, OID("7fffffff0000000000000001"));
@@ -945,20 +847,12 @@ TEST_F(CoreScanTest, TwoPrimaries2ndHasNewerConfigVersion) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "setVersion"
- << 2
- << "electionId"
- << primaryElectionId
- << "hosts"
+ << "ismaster" << true << "secondary" << false << "setVersion"
+ << 2 << "electionId" << primaryElectionId << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
ASSERT_EQUALS(state->maxElectionId, primaryElectionId);
ASSERT_EQUALS(state->configVersion, 2);
@@ -980,20 +874,12 @@ TEST_F(CoreScanTest, TwoPrimaries2ndHasOlderConfigVersion) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "electionId"
- << primaryElectionId
- << "setVersion"
- << 2
- << "hosts"
+ << "ismaster" << true << "secondary" << false << "electionId"
+ << primaryElectionId << "setVersion" << 2 << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
ASSERT_EQUALS(state->maxElectionId, primaryElectionId);
ASSERT_EQUALS(state->configVersion, 2);
@@ -1003,20 +889,13 @@ TEST_F(CoreScanTest, TwoPrimaries2ndHasOlderConfigVersion) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "setVersion"
- << 1
- << "electionId"
- << OID("7fffffff0000000000000001")
+ << "ismaster" << true << "secondary" << false << "setVersion"
+ << 1 << "electionId" << OID("7fffffff0000000000000001")
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
ASSERT_EQUALS(state->maxElectionId, primaryElectionId);
ASSERT_EQUALS(state->configVersion, 2);
@@ -1049,19 +928,12 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSMatch) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "lastWrite"
<< BSON("lastWriteDate" << (nonStale ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1099,19 +971,12 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSNoMatch) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "lastWrite"
<< BSON("lastWriteDate" << (primary ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1150,20 +1015,13 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSNoPrimaryMatch) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << false << "secondary" << true << "hosts"
+ << hosts << "lastWrite"
<< BSON("lastWriteDate"
<< (isNonStale ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1204,20 +1062,13 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSAllFailed) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << false << "secondary" << true << "hosts"
+ << hosts << "lastWrite"
<< BSON("lastWriteDate"
<< (isNonStale ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1257,19 +1108,12 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSAllButPrimaryFailed) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "lastWrite"
<< BSON("lastWriteDate" << (primary ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1309,19 +1153,12 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSOneSecondaryFailed) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "lastWrite"
<< BSON("lastWriteDate" << (primary ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1361,20 +1198,13 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSNonStaleSecondaryMatched) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "lastWrite"
<< BSON("lastWriteDate"
<< (isNonStale ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1409,14 +1239,8 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSNoLastWrite) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "ok"
- << true));
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1450,14 +1274,8 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSZeroNoLastWrite) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "ok"
- << true));
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1496,17 +1314,11 @@ TEST_F(MinOpTimeTest, MinOpTimeMatched) {
bool isNonStale = ns.host.host() == "b";
BSONObj bson = BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
+ << "ismaster" << primary << "secondary" << !primary << "hosts" << hosts
<< "lastWrite"
<< BSON("opTime" << (isNonStale ? opTimeNonStale.toBSON()
: opTimeStale.toBSON()))
- << "ok"
- << true);
+ << "ok" << true);
refresher.receivedIsMaster(ns.host, -1, bson);
ns = refresher.getNextStep();
}
@@ -1541,17 +1353,11 @@ TEST_F(MinOpTimeTest, MinOpTimeNotMatched) {
bool isNonStale = ns.host.host() == "a";
BSONObj bson = BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
+ << "ismaster" << primary << "secondary" << !primary << "hosts" << hosts
<< "lastWrite"
<< BSON("opTime" << (isNonStale ? opTimeNonStale.toBSON()
: opTimeStale.toBSON()))
- << "ok"
- << true);
+ << "ok" << true);
refresher.receivedIsMaster(ns.host, -1, bson);
ns = refresher.getNextStep();
}
@@ -1588,20 +1394,13 @@ TEST_F(MinOpTimeTest, MinOpTimeIgnored) {
bool isNonStale = ns.host.host() == "c";
BSONObj bson = BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
+ << "ismaster" << primary << "secondary" << !primary << "hosts" << hosts
<< "lastWrite"
<< BSON("lastWriteDate"
<< (isNonStale || primary ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTimeStale.toBSON())
- << "ok"
- << true);
+ << "opTime" << opTimeStale.toBSON())
+ << "ok" << true);
refresher.receivedIsMaster(ns.host, -1, bson);
ns = refresher.getNextStep();
}
@@ -1674,7 +1473,7 @@ public:
std::set<HostAndPort> members;
BSONArrayBuilder arrayBuilder;
- for (const auto & [ host, nodeState ] : replicaSet) {
+ for (const auto& [host, nodeState] : replicaSet) {
if (nodeState == NodeState::kStandalone) {
continue;
}
@@ -1690,15 +1489,11 @@ public:
auto bsonHosts = arrayBuilder.arr();
auto markIsMaster = [&](auto host, bool isMaster) {
- refresher.receivedIsMaster(
- host,
- -1,
- BSON("setName" << kSetName << "ismaster" << isMaster << "secondary" << !isMaster
- << "hosts"
- << bsonHosts
- << "ok"
- << true));
-
+ refresher.receivedIsMaster(host,
+ -1,
+ BSON("setName" << kSetName << "ismaster" << isMaster
+ << "secondary" << !isMaster << "hosts"
+ << bsonHosts << "ok" << true));
};
auto markFailed = [&](auto host) {
@@ -1769,13 +1564,16 @@ TEST_F(ChangeNotifierTest, NotifyNominal) {
// 'a' claims to be primary. Signal: Confirmed
updateSet({
{
- HostAndPort("a"), NodeState::kPrimary,
+ HostAndPort("a"),
+ NodeState::kPrimary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
@@ -1783,13 +1581,16 @@ TEST_F(ChangeNotifierTest, NotifyNominal) {
// Getting another scan with the same details. Signal: null
updateSet({
{
- HostAndPort("a"), NodeState::kPrimary,
+ HostAndPort("a"),
+ NodeState::kPrimary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().eventId, currentId);
@@ -1812,13 +1613,16 @@ TEST_F(ChangeNotifierTest, NotifyElections) {
// 'a' claims to be primary. Signal: ConfirmedSet
updateSet({
{
- HostAndPort("a"), NodeState::kPrimary,
+ HostAndPort("a"),
+ NodeState::kPrimary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
@@ -1826,13 +1630,16 @@ TEST_F(ChangeNotifierTest, NotifyElections) {
// 'b' claims to be primary. Signal: ConfirmedSet
updateSet({
{
- HostAndPort("a"), NodeState::kSecondary,
+ HostAndPort("a"),
+ NodeState::kSecondary,
},
{
- HostAndPort("b"), NodeState::kPrimary,
+ HostAndPort("b"),
+ NodeState::kPrimary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
@@ -1840,13 +1647,16 @@ TEST_F(ChangeNotifierTest, NotifyElections) {
// All hosts tell us that they are not primary. Signal: null
updateSet({
{
- HostAndPort("a"), NodeState::kSecondary,
+ HostAndPort("a"),
+ NodeState::kSecondary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().eventId, currentId);
@@ -1854,13 +1664,16 @@ TEST_F(ChangeNotifierTest, NotifyElections) {
// 'a' claims to be primary again. Signal: ConfirmedSet
updateSet({
{
- HostAndPort("a"), NodeState::kPrimary,
+ HostAndPort("a"),
+ NodeState::kPrimary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
@@ -1883,13 +1696,16 @@ TEST_F(ChangeNotifierTest, NotifyReconfig) {
// Update the set with a full scan showing no primary. Signal: PossibleSet
updateSet({
{
- HostAndPort("a"), NodeState::kSecondary,
+ HostAndPort("a"),
+ NodeState::kSecondary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().eventId, ++currentId);
@@ -1897,13 +1713,16 @@ TEST_F(ChangeNotifierTest, NotifyReconfig) {
// Mark 'a' as removed. Signal: null
updateSet({
{
- HostAndPort("a"), NodeState::kStandalone,
+ HostAndPort("a"),
+ NodeState::kStandalone,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().eventId, currentId);
@@ -1911,16 +1730,20 @@ TEST_F(ChangeNotifierTest, NotifyReconfig) {
// Discover 'd' as secondary. Signal: PossibleSet
updateSet({
{
- HostAndPort("a"), NodeState::kSecondary,
+ HostAndPort("a"),
+ NodeState::kSecondary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
{
- HostAndPort("d"), NodeState::kSecondary,
+ HostAndPort("d"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastPossibleSetId, ++currentId);
@@ -1928,16 +1751,20 @@ TEST_F(ChangeNotifierTest, NotifyReconfig) {
// Mark 'b' as primary, no 'd'. Signal: ConfirmedSet
updateSet({
{
- HostAndPort("a"), NodeState::kSecondary,
+ HostAndPort("a"),
+ NodeState::kSecondary,
},
{
- HostAndPort("b"), NodeState::kPrimary,
+ HostAndPort("b"),
+ NodeState::kPrimary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
{
- HostAndPort("d"), NodeState::kStandalone,
+ HostAndPort("d"),
+ NodeState::kStandalone,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
@@ -1945,13 +1772,16 @@ TEST_F(ChangeNotifierTest, NotifyReconfig) {
// Mark 'a' as removed. Signal: ConfirmedSet
updateSet({
{
- HostAndPort("a"), NodeState::kStandalone,
+ HostAndPort("a"),
+ NodeState::kStandalone,
},
{
- HostAndPort("b"), NodeState::kPrimary,
+ HostAndPort("b"),
+ NodeState::kPrimary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
@@ -1959,13 +1789,16 @@ TEST_F(ChangeNotifierTest, NotifyReconfig) {
// Mark 'a' as secondary again. Signal: ConfirmedSet
updateSet({
{
- HostAndPort("b"), NodeState::kPrimary,
+ HostAndPort("b"),
+ NodeState::kPrimary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
{
- HostAndPort("a"), NodeState::kSecondary,
+ HostAndPort("a"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
diff --git a/src/mongo/client/sasl_client_authenticate.h b/src/mongo/client/sasl_client_authenticate.h
index 307e837c38b..4b342a41a99 100644
--- a/src/mongo/client/sasl_client_authenticate.h
+++ b/src/mongo/client/sasl_client_authenticate.h
@@ -83,4 +83,4 @@ extern Future<void> (*saslClientAuthenticate)(auth::RunCommandHook runCommand,
* into "*payload". In all other cases, returns
*/
Status saslExtractPayload(const BSONObj& cmdObj, std::string* payload, BSONType* type);
-}
+} // namespace mongo
diff --git a/src/mongo/client/sasl_client_authenticate_impl.cpp b/src/mongo/client/sasl_client_authenticate_impl.cpp
index b025000fb5b..4d19e5597d2 100644
--- a/src/mongo/client/sasl_client_authenticate_impl.cpp
+++ b/src/mongo/client/sasl_client_authenticate_impl.cpp
@@ -56,9 +56,9 @@
namespace mongo {
-using std::endl;
using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
+using std::endl;
namespace {
diff --git a/src/mongo/client/sasl_scram_client_conversation.cpp b/src/mongo/client/sasl_scram_client_conversation.cpp
index effd369a4ff..c0f38495822 100644
--- a/src/mongo/client/sasl_scram_client_conversation.cpp
+++ b/src/mongo/client/sasl_scram_client_conversation.cpp
@@ -43,8 +43,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
StatusWith<bool> SaslSCRAMClientConversation::step(StringData inputData, std::string* outputData) {
_step++;
@@ -58,8 +58,8 @@ StatusWith<bool> SaslSCRAMClientConversation::step(StringData inputData, std::st
return _thirdStep(inputData, outputData);
default:
return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
- str::stream() << "Invalid SCRAM authentication step: "
- << _step);
+ str::stream()
+ << "Invalid SCRAM authentication step: " << _step);
}
}
@@ -126,8 +126,7 @@ StatusWith<bool> SaslSCRAMClientConversation::_secondStep(StringData inputData,
return Status(ErrorCodes::BadValue,
str::stream()
<< "Incorrect number of arguments for first SCRAM server message, got "
- << input.size()
- << " expected at least 3");
+ << input.size() << " expected at least 3");
}
if (!str::startsWith(input[0], "r=") || input[0].size() < 3) {
diff --git a/src/mongo/client/sasl_sspi.cpp b/src/mongo/client/sasl_sspi.cpp
index b917b016844..89f798577a9 100644
--- a/src/mongo/client/sasl_sspi.cpp
+++ b/src/mongo/client/sasl_sspi.cpp
@@ -439,8 +439,7 @@ sasl_client_plug_t sspiClientPlugin[] = {
{sspiPluginName, /* mechanism name */
112, /* TODO: (taken from gssapi) best mech additional security layer strength factor */
SASL_SEC_NOPLAINTEXT /* eam: copied from gssapi */
- |
- SASL_SEC_NOACTIVE | SASL_SEC_NOANONYMOUS | SASL_SEC_MUTUAL_AUTH |
+ | SASL_SEC_NOACTIVE | SASL_SEC_NOANONYMOUS | SASL_SEC_MUTUAL_AUTH |
SASL_SEC_PASS_CREDENTIALS, /* security_flags */
SASL_FEAT_NEEDSERVERFQDN | SASL_FEAT_WANT_CLIENT_FIRST | SASL_FEAT_ALLOWS_PROXY,
nullptr, /* required prompt ids, nullptr = user/pass only */
@@ -482,8 +481,7 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(SaslSspiClientPlugin,
if (SASL_OK != ret) {
return Status(ErrorCodes::UnknownError,
str::stream() << "could not add SASL Client SSPI plugin " << sspiPluginName
- << ": "
- << sasl_errstring(ret, nullptr, nullptr));
+ << ": " << sasl_errstring(ret, nullptr, nullptr));
}
return Status::OK();
@@ -496,8 +494,7 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(SaslPlainClientPlugin,
if (SASL_OK != ret) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Could not add SASL Client PLAIN plugin " << sspiPluginName
- << ": "
- << sasl_errstring(ret, nullptr, nullptr));
+ << ": " << sasl_errstring(ret, nullptr, nullptr));
}
return Status::OK();
diff --git a/src/mongo/crypto/aead_encryption.cpp b/src/mongo/crypto/aead_encryption.cpp
index 030758850c1..b5e0ae4ce1c 100644
--- a/src/mongo/crypto/aead_encryption.cpp
+++ b/src/mongo/crypto/aead_encryption.cpp
@@ -101,9 +101,7 @@ Status _aesEncrypt(const SymmetricKey& key,
if (len != aesCBCCipherOutputLength(inLen)) {
return {ErrorCodes::BadValue,
str::stream() << "Encrypt error, expected cipher text of length "
- << aesCBCCipherOutputLength(inLen)
- << " but found "
- << len};
+ << aesCBCCipherOutputLength(inLen) << " but found " << len};
}
return Status::OK();
@@ -117,12 +115,11 @@ Status _aesDecrypt(const SymmetricKey& key,
std::size_t outLen,
std::size_t* resultLen) try {
// Check the plaintext buffer can fit the product of decryption
- auto[lowerBound, upperBound] = aesCBCExpectedPlaintextLen(in.length());
+ auto [lowerBound, upperBound] = aesCBCExpectedPlaintextLen(in.length());
if (upperBound > outLen) {
return {ErrorCodes::BadValue,
str::stream() << "Cleartext buffer of size " << outLen
- << " too small for output which can be as large as "
- << upperBound
+ << " too small for output which can be as large as " << upperBound
<< "]"};
}
@@ -145,13 +142,8 @@ Status _aesDecrypt(const SymmetricKey& key,
if (*resultLen < lowerBound || *resultLen > upperBound) {
return {ErrorCodes::BadValue,
str::stream() << "Decrypt error, expected clear text length in interval"
- << "["
- << lowerBound
- << ","
- << upperBound
- << "]"
- << "but found "
- << *resultLen};
+ << "[" << lowerBound << "," << upperBound << "]"
+ << "but found " << *resultLen};
}
/* Check that padding was removed.
@@ -211,8 +203,7 @@ Status aeadEncrypt(const SymmetricKey& key,
return Status(ErrorCodes::BadValue,
str::stream()
<< "AssociatedData for encryption is too large. Cannot be larger than "
- << kMaxAssociatedDataLength
- << " bytes.");
+ << kMaxAssociatedDataLength << " bytes.");
}
// According to the rfc on AES encryption, the associatedDataLength is defined as the
@@ -292,8 +283,7 @@ Status aeadEncryptWithIV(ConstDataRange key,
return Status(ErrorCodes::BadValue,
str::stream()
<< "AssociatedData for encryption is too large. Cannot be larger than "
- << kMaxAssociatedDataLength
- << " bytes.");
+ << kMaxAssociatedDataLength << " bytes.");
}
const uint8_t* macKey = reinterpret_cast<const uint8_t*>(key.data());
@@ -357,8 +347,7 @@ Status aeadDecrypt(const SymmetricKey& key,
return Status(ErrorCodes::BadValue,
str::stream()
<< "AssociatedData for encryption is too large. Cannot be larger than "
- << kMaxAssociatedDataLength
- << " bytes.");
+ << kMaxAssociatedDataLength << " bytes.");
}
const uint8_t* macKey = key.getKey();
diff --git a/src/mongo/crypto/hash_block.h b/src/mongo/crypto/hash_block.h
index ac5a3017b9c..4d408e9d5e7 100644
--- a/src/mongo/crypto/hash_block.h
+++ b/src/mongo/crypto/hash_block.h
@@ -67,9 +67,9 @@ public:
*/
static StatusWith<HashBlock> fromBuffer(const uint8_t* input, size_t inputLen) {
if (inputLen != kHashLength) {
- return {
- ErrorCodes::InvalidLength,
- str::stream() << "Unsupported " << Traits::name << " hash length: " << inputLen};
+ return {ErrorCodes::InvalidLength,
+ str::stream() << "Unsupported " << Traits::name
+ << " hash length: " << inputLen};
}
HashType newHash;
@@ -157,8 +157,8 @@ public:
if (binData.length != kHashLength) {
return {ErrorCodes::UnsupportedFormat,
- str::stream() << "Unsupported " << Traits::name << " hash length: "
- << binData.length};
+ str::stream() << "Unsupported " << Traits::name
+ << " hash length: " << binData.length};
}
HashType newHash;
diff --git a/src/mongo/crypto/mechanism_scram.h b/src/mongo/crypto/mechanism_scram.h
index ab3c39273fb..fcb16331830 100644
--- a/src/mongo/crypto/mechanism_scram.h
+++ b/src/mongo/crypto/mechanism_scram.h
@@ -291,11 +291,10 @@ public:
Presecrets<HashBlock>(password, salt, iterationCount));
const auto encodedSalt =
base64::encode(reinterpret_cast<const char*>(salt.data()), salt.size());
- return BSON(kIterationCountFieldName << iterationCount << kSaltFieldName << encodedSalt
- << kStoredKeyFieldName
- << secrets.storedKey().toString()
- << kServerKeyFieldName
- << secrets.serverKey().toString());
+ return BSON(kIterationCountFieldName
+ << iterationCount << kSaltFieldName << encodedSalt << kStoredKeyFieldName
+ << secrets.storedKey().toString() << kServerKeyFieldName
+ << secrets.serverKey().toString());
}
const HashBlock& clientKey() const {
diff --git a/src/mongo/crypto/symmetric_crypto_apple.cpp b/src/mongo/crypto/symmetric_crypto_apple.cpp
index 9ca5c9c0b1e..216e33b8fa8 100644
--- a/src/mongo/crypto/symmetric_crypto_apple.cpp
+++ b/src/mongo/crypto/symmetric_crypto_apple.cpp
@@ -66,9 +66,7 @@ public:
// Therefore we expect a 128 bit block length.
uassert(ErrorCodes::BadValue,
str::stream() << "Invalid ivlen for selected algorithm, expected "
- << kCCBlockSizeAES128
- << ", got "
- << ivLen,
+ << kCCBlockSizeAES128 << ", got " << ivLen,
ivLen == kCCBlockSizeAES128);
CCCryptorRef context = nullptr;
diff --git a/src/mongo/crypto/symmetric_crypto_openssl.cpp b/src/mongo/crypto/symmetric_crypto_openssl.cpp
index 6329331a511..4e661b98bbd 100644
--- a/src/mongo/crypto/symmetric_crypto_openssl.cpp
+++ b/src/mongo/crypto/symmetric_crypto_openssl.cpp
@@ -63,8 +63,8 @@ void initCipherContext(
}
}
uassert(ErrorCodes::BadValue,
- str::stream() << "Unrecognized AES key size/cipher mode. Size: " << keySize << " Mode: "
- << getStringFromCipherMode(mode),
+ str::stream() << "Unrecognized AES key size/cipher mode. Size: " << keySize
+ << " Mode: " << getStringFromCipherMode(mode),
cipher);
const bool initOk = (1 == init(ctx, cipher, nullptr, key.getKey(), iv));
@@ -188,8 +188,9 @@ public:
// validateEncryptionOption asserts that platforms without GCM will never start in GCM mode
if (_mode == aesMode::gcm) {
#ifdef EVP_CTRL_GCM_GET_TAG
- if (1 != EVP_CIPHER_CTX_ctrl(
- _ctx.get(), EVP_CTRL_GCM_SET_TAG, tagLen, const_cast<uint8_t*>(tag))) {
+ if (1 !=
+ EVP_CIPHER_CTX_ctrl(
+ _ctx.get(), EVP_CTRL_GCM_SET_TAG, tagLen, const_cast<uint8_t*>(tag))) {
return Status(ErrorCodes::UnknownError,
str::stream()
<< "Unable to set GCM tag: "
diff --git a/src/mongo/db/auth/authorization_manager_impl.cpp b/src/mongo/db/auth/authorization_manager_impl.cpp
index 07996cd2f82..63e59eb4628 100644
--- a/src/mongo/db/auth/authorization_manager_impl.cpp
+++ b/src/mongo/db/auth/authorization_manager_impl.cpp
@@ -431,8 +431,7 @@ Status AuthorizationManagerImpl::_initializeUserFromPrivilegeDocument(User* user
return Status(ErrorCodes::BadValue,
str::stream() << "User name from privilege document \"" << userName
<< "\" doesn't match name of provided User \""
- << user->getName().getUser()
- << "\"");
+ << user->getName().getUser() << "\"");
}
user->setID(parser.extractUserIDFromUserDocument(privDoc));
diff --git a/src/mongo/db/auth/authorization_manager_test.cpp b/src/mongo/db/auth/authorization_manager_test.cpp
index ab3a650d6dd..46c6da88230 100644
--- a/src/mongo/db/auth/authorization_manager_test.cpp
+++ b/src/mongo/db/auth/authorization_manager_test.cpp
@@ -127,9 +127,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2User) {
<< "v2read"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
@@ -142,9 +140,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2User) {
<< "v2cluster"
<< "db"
<< "admin"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "clusterAdmin"
<< "db"
@@ -249,19 +245,17 @@ public:
private:
Status _getUserDocument(OperationContext* opCtx, const UserName& userName, BSONObj* userDoc) {
- Status status = findOne(opCtx,
- AuthorizationManager::usersCollectionNamespace,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << userName.getUser()
- << AuthorizationManager::USER_DB_FIELD_NAME
- << userName.getDB()),
- userDoc);
+ Status status =
+ findOne(opCtx,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << userName.getUser() << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getDB()),
+ userDoc);
if (status == ErrorCodes::NoMatchingDocument) {
status = Status(ErrorCodes::UserNotFound,
str::stream() << "Could not find user \"" << userName.getUser()
- << "\" for db \""
- << userName.getDB()
- << "\"");
+ << "\" for db \"" << userName.getDB() << "\"");
}
return status;
}
@@ -297,9 +291,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2UserWithUnrecognizedActions) {
<< "myUser"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "myRole"
<< "db"
diff --git a/src/mongo/db/auth/authorization_session_impl.cpp b/src/mongo/db/auth/authorization_session_impl.cpp
index fd186c06d7d..c308e3f5304 100644
--- a/src/mongo/db/auth/authorization_session_impl.cpp
+++ b/src/mongo/db/auth/authorization_session_impl.cpp
@@ -493,8 +493,7 @@ Status AuthorizationSessionImpl::checkAuthorizedToGrantPrivilege(const Privilege
ActionType::grantRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to grant privileges on the "
- << resource.databaseToMatch()
- << "database");
+ << resource.databaseToMatch() << "database");
}
} else if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName("admin"),
ActionType::grantRole)) {
@@ -514,8 +513,7 @@ Status AuthorizationSessionImpl::checkAuthorizedToRevokePrivilege(const Privileg
ActionType::revokeRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to revoke privileges on the "
- << resource.databaseToMatch()
- << "database");
+ << resource.databaseToMatch() << "database");
}
} else if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName("admin"),
ActionType::revokeRole)) {
@@ -1001,9 +999,7 @@ bool AuthorizationSessionImpl::isImpersonating() const {
auto AuthorizationSessionImpl::checkCursorSessionPrivilege(
OperationContext* const opCtx, const boost::optional<LogicalSessionId> cursorSessionId)
-> Status {
- auto nobodyIsLoggedIn = [authSession = this] {
- return !authSession->isAuthenticated();
- };
+ auto nobodyIsLoggedIn = [authSession = this] { return !authSession->isAuthenticated(); };
auto authHasImpersonatePrivilege = [authSession = this] {
return authSession->isAuthorizedForPrivilege(
@@ -1037,13 +1033,12 @@ auto AuthorizationSessionImpl::checkCursorSessionPrivilege(
// Operation Context (which implies a background job
!authHasImpersonatePrivilege() // Or if the user has an impersonation privilege, in which
// case, the user gets to sidestep certain checks.
- ) {
+ ) {
return Status{ErrorCodes::Unauthorized,
- str::stream() << "Cursor session id ("
- << sessionIdToStringOrNone(cursorSessionId)
- << ") is not the same as the operation context's session id ("
- << sessionIdToStringOrNone(opCtx->getLogicalSessionId())
- << ")"};
+ str::stream()
+ << "Cursor session id (" << sessionIdToStringOrNone(cursorSessionId)
+ << ") is not the same as the operation context's session id ("
+ << sessionIdToStringOrNone(opCtx->getLogicalSessionId()) << ")"};
}
return Status::OK();
diff --git a/src/mongo/db/auth/authorization_session_test.cpp b/src/mongo/db/auth/authorization_session_test.cpp
index 09476aca817..a8b51dc3137 100644
--- a/src/mongo/db/auth/authorization_session_test.cpp
+++ b/src/mongo/db/auth/authorization_session_test.cpp
@@ -179,9 +179,7 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -207,9 +205,7 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
<< "admin"
<< "db"
<< "admin"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWriteAnyDatabase"
<< "db"
@@ -253,9 +249,7 @@ TEST_F(AuthorizationSessionTest, DuplicateRolesOK) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -285,9 +279,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "rw"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -302,9 +294,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "useradmin"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "userAdmin"
<< "db"
@@ -316,9 +306,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "rwany"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWriteAnyDatabase"
<< "db"
@@ -334,9 +322,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "useradminany"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "userAdminAnyDatabase"
<< "db"
@@ -413,9 +399,7 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -445,9 +429,7 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
@@ -490,9 +472,7 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -523,9 +503,7 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
@@ -559,9 +537,7 @@ TEST_F(AuthorizationSessionTest, AcquireUserObtainsAndValidatesAuthenticationRes
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -569,8 +545,7 @@ TEST_F(AuthorizationSessionTest, AcquireUserObtainsAndValidatesAuthenticationRes
<< "authenticationRestrictions"
<< BSON_ARRAY(BSON("clientSource" << BSON_ARRAY("192.168.0.0/24"
<< "192.168.2.10")
- << "serverAddress"
- << BSON_ARRAY("192.168.0.2"))
+ << "serverAddress" << BSON_ARRAY("192.168.0.2"))
<< BSON("clientSource" << BSON_ARRAY("2001:DB8::1") << "serverAddress"
<< BSON_ARRAY("2001:DB8::2"))
<< BSON("clientSource" << BSON_ARRAY("127.0.0.1"
@@ -912,11 +887,9 @@ TEST_F(AuthorizationSessionTest, CanAggregateOutWithInsertAndRemoveOnTargetNames
uassertStatusOK(authzSession->getPrivilegesForAggregate(testFooNss, cmdObj, false));
ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges));
- BSONObj cmdObjNoBypassDocumentValidation = BSON(
- "aggregate" << testFooNss.coll() << "pipeline" << pipeline << "bypassDocumentValidation"
- << false
- << "cursor"
- << BSONObj());
+ BSONObj cmdObjNoBypassDocumentValidation =
+ BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline
+ << "bypassDocumentValidation" << false << "cursor" << BSONObj());
privileges = uassertStatusOK(authzSession->getPrivilegesForAggregate(
testFooNss, cmdObjNoBypassDocumentValidation, false));
ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges));
@@ -929,10 +902,8 @@ TEST_F(AuthorizationSessionTest,
Privilege(testBarCollResource, {ActionType::insert, ActionType::remove})});
BSONArray pipeline = BSON_ARRAY(BSON("$out" << testBarNss.coll()));
- BSONObj cmdObj =
- BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj()
- << "bypassDocumentValidation"
- << true);
+ BSONObj cmdObj = BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor"
+ << BSONObj() << "bypassDocumentValidation" << true);
PrivilegeVector privileges =
uassertStatusOK(authzSession->getPrivilegesForAggregate(testFooNss, cmdObj, false));
ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges));
@@ -947,10 +918,8 @@ TEST_F(AuthorizationSessionTest,
{ActionType::insert, ActionType::remove, ActionType::bypassDocumentValidation})});
BSONArray pipeline = BSON_ARRAY(BSON("$out" << testBarNss.coll()));
- BSONObj cmdObj =
- BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj()
- << "bypassDocumentValidation"
- << true);
+ BSONObj cmdObj = BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor"
+ << BSONObj() << "bypassDocumentValidation" << true);
PrivilegeVector privileges =
uassertStatusOK(authzSession->getPrivilegesForAggregate(testFooNss, cmdObj, true));
ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges));
@@ -1145,9 +1114,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsNotCoauthorizedWithEmptyUser
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
@@ -1164,9 +1131,7 @@ TEST_F(AuthorizationSessionTest,
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
@@ -1181,9 +1146,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsCoauthorizedWithIntersecting
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(managerState->insertPrivilegeDocument(_opCtx.get(),
@@ -1191,9 +1154,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsCoauthorizedWithIntersecting
<< "admin"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
@@ -1211,9 +1172,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsNotCoauthorizedWithNoninters
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(managerState->insertPrivilegeDocument(_opCtx.get(),
@@ -1221,9 +1180,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsNotCoauthorizedWithNoninters
<< "admin"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
@@ -1242,9 +1199,7 @@ TEST_F(AuthorizationSessionTest,
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(managerState->insertPrivilegeDocument(_opCtx.get(),
@@ -1252,9 +1207,7 @@ TEST_F(AuthorizationSessionTest,
<< "admin"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index 7fa1f2ff5f0..e87a9880e73 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -89,11 +89,8 @@ Status AuthzManagerExternalStateLocal::getStoredAuthorizationVersion(OperationCo
str::stream()
<< "Could not determine schema version of authorization data. "
"Bad (non-numeric) type "
- << typeName(versionElement.type())
- << " ("
- << versionElement.type()
- << ") for "
- << AuthorizationManager::schemaVersionFieldName
+ << typeName(versionElement.type()) << " (" << versionElement.type()
+ << ") for " << AuthorizationManager::schemaVersionFieldName
<< " field in version document");
}
} else if (status == ErrorCodes::NoMatchingDocument) {
@@ -132,8 +129,7 @@ void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privileges
"",
std::string(str::stream() << "Skipped privileges on resource "
<< privileges[i].getResourcePattern().toString()
- << ". Reason: "
- << errmsg)));
+ << ". Reason: " << errmsg)));
}
}
}
@@ -179,11 +175,8 @@ Status AuthzManagerExternalStateLocal::getUserDescription(OperationContext* opCt
userRoles << BSON("role" << role.getRole() << "db" << role.getDB());
}
*result = BSON("_id" << userName.getUser() << "user" << userName.getUser() << "db"
- << userName.getDB()
- << "credentials"
- << BSON("external" << true)
- << "roles"
- << userRoles.arr());
+ << userName.getDB() << "credentials" << BSON("external" << true)
+ << "roles" << userRoles.arr());
}
BSONElement directRolesElement;
@@ -285,17 +278,14 @@ Status AuthzManagerExternalStateLocal::_getUserDocument(OperationContext* opCtx,
Status status = findOne(opCtx,
AuthorizationManager::usersCollectionNamespace,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << userName.getUser()
- << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getUser() << AuthorizationManager::USER_DB_FIELD_NAME
<< userName.getDB()),
userDoc);
if (status == ErrorCodes::NoMatchingDocument) {
- status =
- Status(ErrorCodes::UserNotFound,
- str::stream() << "Could not find user \"" << userName.getUser() << "\" for db \""
- << userName.getDB()
- << "\"");
+ status = Status(ErrorCodes::UserNotFound,
+ str::stream() << "Could not find user \"" << userName.getUser()
+ << "\" for db \"" << userName.getDB() << "\"");
}
return status;
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index 16cdd668604..19a7977e605 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -83,8 +83,7 @@ void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privileges
"",
std::string(str::stream() << "Skipped privileges on resource "
<< privileges[i].getResourcePattern().toString()
- << ". Reason: "
- << errmsg)));
+ << ". Reason: " << errmsg)));
}
}
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.cpp b/src/mongo/db/auth/authz_manager_external_state_s.cpp
index 98893954d4e..ec1a695ba53 100644
--- a/src/mongo/db/auth/authz_manager_external_state_s.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_s.cpp
@@ -127,12 +127,8 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC
<< userName.getUser()
<< AuthorizationManager::USER_DB_FIELD_NAME
<< userName.getDB()))
- << "showPrivileges"
- << true
- << "showCredentials"
- << true
- << "showAuthenticationRestrictions"
- << true);
+ << "showPrivileges" << true << "showCredentials" << true
+ << "showAuthenticationRestrictions" << true);
BSONObjBuilder builder;
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand(
opCtx, "admin", usersInfoCmd, &builder);
@@ -149,10 +145,9 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC
if (foundUsers.size() > 1) {
return Status(ErrorCodes::UserDataInconsistent,
- str::stream() << "Found multiple users on the \"" << userName.getDB()
- << "\" database with name \""
- << userName.getUser()
- << "\"");
+ str::stream()
+ << "Found multiple users on the \"" << userName.getDB()
+ << "\" database with name \"" << userName.getUser() << "\"");
}
*result = foundUsers[0].Obj().getOwned();
return Status::OK();
@@ -162,10 +157,9 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC
BSONArrayBuilder userRolesBuilder;
auto& sslPeerInfo = SSLPeerInfo::forSession(opCtx->getClient()->session());
for (const RoleName& role : sslPeerInfo.roles) {
- userRolesBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << role.getDB()));
+ userRolesBuilder.append(BSON(
+ AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()));
}
BSONArray providedRoles = userRolesBuilder.arr();
@@ -194,16 +188,12 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC
"Recieved malformed response to request for X509 roles from config server");
}
- *result = BSON("_id" << userName.getUser() << "user" << userName.getUser() << "db"
- << userName.getDB()
- << "credentials"
- << BSON("external" << true)
- << "roles"
- << BSONArray(cmdResult["roles"].Obj())
- << "inheritedRoles"
- << BSONArray(cmdResult["inheritedRoles"].Obj())
- << "inheritedPrivileges"
- << BSONArray(cmdResult["inheritedPrivileges"].Obj()));
+ *result =
+ BSON("_id" << userName.getUser() << "user" << userName.getUser() << "db"
+ << userName.getDB() << "credentials" << BSON("external" << true) << "roles"
+ << BSONArray(cmdResult["roles"].Obj()) << "inheritedRoles"
+ << BSONArray(cmdResult["inheritedRoles"].Obj()) << "inheritedPrivileges"
+ << BSONArray(cmdResult["inheritedPrivileges"].Obj()));
return Status::OK();
}
}
@@ -215,11 +205,11 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(
AuthenticationRestrictionsFormat showRestrictions,
BSONObj* result) {
BSONObjBuilder rolesInfoCmd;
- rolesInfoCmd.append("rolesInfo",
- BSON_ARRAY(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << roleName.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << roleName.getDB())));
+ rolesInfoCmd.append(
+ "rolesInfo",
+ BSON_ARRAY(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB())));
addShowToBuilder(&rolesInfoCmd, showPrivileges, showRestrictions);
BSONObjBuilder builder;
@@ -238,9 +228,7 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(
if (foundRoles.size() > 1) {
return Status(ErrorCodes::RoleDataInconsistent,
str::stream() << "Found multiple roles on the \"" << roleName.getDB()
- << "\" database with name \""
- << roleName.getRole()
- << "\"");
+ << "\" database with name \"" << roleName.getRole() << "\"");
}
*result = foundRoles[0].Obj().getOwned();
return Status::OK();
@@ -255,8 +243,7 @@ Status AuthzManagerExternalStateMongos::getRolesDescription(
for (const RoleName& roleName : roles) {
rolesInfoCmdArray << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << roleName.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
<< roleName.getDB());
}
diff --git a/src/mongo/db/auth/privilege_parser_test.cpp b/src/mongo/db/auth/privilege_parser_test.cpp
index 288760ffb0d..969360a6f51 100644
--- a/src/mongo/db/auth/privilege_parser_test.cpp
+++ b/src/mongo/db/auth/privilege_parser_test.cpp
@@ -56,24 +56,21 @@ TEST(PrivilegeParserTest, IsValidTest) {
<< ""
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
// resource can't have db without collection
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
// resource can't have collection without db
parsedPrivilege.parseBSON(BSON("resource" << BSON("collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
@@ -82,8 +79,7 @@ TEST(PrivilegeParserTest, IsValidTest) {
<< ""
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
@@ -92,8 +88,7 @@ TEST(PrivilegeParserTest, IsValidTest) {
<< "test"
<< "collection"
<< "foo")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
@@ -116,8 +111,7 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
<< ""
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -143,8 +137,7 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
<< "test"
<< "collection"
<< "foo")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -171,8 +164,7 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
<< "test"
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -198,8 +190,7 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
<< ""
<< "collection"
<< "foo")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
diff --git a/src/mongo/db/auth/role_graph.cpp b/src/mongo/db/auth/role_graph.cpp
index 51f91a10ba8..164d09a2cb8 100644
--- a/src/mongo/db/auth/role_graph.cpp
+++ b/src/mongo/db/auth/role_graph.cpp
@@ -167,8 +167,8 @@ Status RoleGraph::addRoleToRole(const RoleName& recipient, const RoleName& role)
}
if (isBuiltinRole(recipient)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot grant roles to built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot grant roles to built-in role: " << role.getFullName());
}
if (!roleExists(role)) {
return Status(ErrorCodes::RoleNotFound,
@@ -193,8 +193,8 @@ Status RoleGraph::removeRoleFromRole(const RoleName& recipient, const RoleName&
}
if (isBuiltinRole(recipient)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot remove roles from built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot remove roles from built-in role: " << role.getFullName());
}
if (!roleExists(role)) {
return Status(ErrorCodes::RoleNotFound,
@@ -207,8 +207,9 @@ Status RoleGraph::removeRoleFromRole(const RoleName& recipient, const RoleName&
_roleToMembers[role].erase(itToRm);
} else {
return Status(ErrorCodes::RolesNotRelated,
- str::stream() << recipient.getFullName() << " is not a member"
- " of "
+ str::stream() << recipient.getFullName()
+ << " is not a member"
+ " of "
<< role.getFullName());
}
@@ -227,8 +228,8 @@ Status RoleGraph::removeAllRolesFromRole(const RoleName& victim) {
}
if (isBuiltinRole(victim)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot remove roles from built-in role: "
- << victim.getFullName());
+ str::stream()
+ << "Cannot remove roles from built-in role: " << victim.getFullName());
}
RoleNameVector& subordinatesOfVictim = _roleToSubordinates[victim];
@@ -253,8 +254,8 @@ Status RoleGraph::addPrivilegeToRole(const RoleName& role, const Privilege& priv
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot grant privileges to built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot grant privileges to built-in role: " << role.getFullName());
}
_addPrivilegeToRoleNoChecks(role, privilegeToAdd);
@@ -277,8 +278,8 @@ Status RoleGraph::addPrivilegesToRole(const RoleName& role,
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot grant privileges to built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot grant privileges to built-in role: " << role.getFullName());
}
for (PrivilegeVector::const_iterator it = privilegesToAdd.begin(); it != privilegesToAdd.end();
@@ -296,8 +297,8 @@ Status RoleGraph::removePrivilegeFromRole(const RoleName& role,
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot remove privileges from built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot remove privileges from built-in role: " << role.getFullName());
}
PrivilegeVector& currentPrivileges = _directPrivilegesForRole[role];
@@ -325,8 +326,9 @@ Status RoleGraph::removePrivilegeFromRole(const RoleName& role,
}
}
return Status(ErrorCodes::PrivilegeNotFound,
- str::stream() << "Role: " << role.getFullName() << " does not "
- "contain any privileges on "
+ str::stream() << "Role: " << role.getFullName()
+ << " does not "
+ "contain any privileges on "
<< privilegeToRemove.getResourcePattern().toString());
}
@@ -350,8 +352,8 @@ Status RoleGraph::removeAllPrivilegesFromRole(const RoleName& role) {
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot remove privileges from built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot remove privileges from built-in role: " << role.getFullName());
}
_directPrivilegesForRole[role].clear();
return Status::OK();
@@ -434,8 +436,8 @@ Status RoleGraph::_recomputePrivilegeDataHelper(const RoleName& startingRole,
if (!roleExists(currentRole)) {
return Status(ErrorCodes::RoleNotFound,
- str::stream() << "Role: " << currentRole.getFullName()
- << " does not exist");
+ str::stream()
+ << "Role: " << currentRole.getFullName() << " does not exist");
}
// Check for cycles
diff --git a/src/mongo/db/auth/role_graph_test.cpp b/src/mongo/db/auth/role_graph_test.cpp
index a2ed3dece7f..765d3d3c61c 100644
--- a/src/mongo/db/auth/role_graph_test.cpp
+++ b/src/mongo/db/auth/role_graph_test.cpp
@@ -196,7 +196,7 @@ TEST(RoleGraphTest, AddRemoveRoles) {
* |
* v
* D
- */
+ */
it = graph.getDirectSubordinates(roleA); // should be roleB and roleC, order doesn't matter
@@ -825,26 +825,22 @@ TEST(RoleGraphTest, AddRoleFromDocument) {
<< "dbA"
<< "collection"
<< "collA")
- << "actions"
- << BSON_ARRAY("insert"))),
+ << "actions" << BSON_ARRAY("insert"))),
BSON_ARRAY(BSON("resource" << BSON("db"
<< "dbB"
<< "collection"
<< "collB")
- << "actions"
- << BSON_ARRAY("insert"))
+ << "actions" << BSON_ARRAY("insert"))
<< BSON("resource" << BSON("db"
<< "dbC"
<< "collection"
<< "collC")
- << "actions"
- << BSON_ARRAY("compact"))),
+ << "actions" << BSON_ARRAY("compact"))),
BSON_ARRAY(BSON("resource" << BSON("db"
<< ""
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find"))),
+ << "actions" << BSON_ARRAY("find"))),
};
const BSONArray restrictions[] = {
@@ -922,33 +918,28 @@ TEST(RoleGraphTest, AddRoleFromDocumentWithRestricitonMerge) {
BSON_ARRAY(BSON("serverAddress" << BSON_ARRAY("127.0.0.1/8")));
RoleGraph graph;
- ASSERT_OK(graph.addRoleFromDocument(BSON("_id"
- << "dbA.roleA"
- << "role"
- << "roleA"
- << "db"
- << "dbA"
- << "privileges"
- << BSONArray()
- << "roles"
- << BSONArray()
- << "authenticationRestrictions"
- << roleARestrictions)));
- ASSERT_OK(graph.addRoleFromDocument(BSON("_id"
- << "dbB.roleB"
- << "role"
- << "roleB"
- << "db"
- << "dbB"
- << "privileges"
- << BSONArray()
- << "roles"
- << BSON_ARRAY(BSON("role"
- << "roleA"
- << "db"
- << "dbA"))
- << "authenticationRestrictions"
- << roleBRestrictions)));
+ ASSERT_OK(
+ graph.addRoleFromDocument(BSON("_id"
+ << "dbA.roleA"
+ << "role"
+ << "roleA"
+ << "db"
+ << "dbA"
+ << "privileges" << BSONArray() << "roles" << BSONArray()
+ << "authenticationRestrictions" << roleARestrictions)));
+ ASSERT_OK(
+ graph.addRoleFromDocument(BSON("_id"
+ << "dbB.roleB"
+ << "role"
+ << "roleB"
+ << "db"
+ << "dbB"
+ << "privileges" << BSONArray() << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "roleA"
+ << "db"
+ << "dbA"))
+ << "authenticationRestrictions" << roleBRestrictions)));
ASSERT_OK(graph.recomputePrivilegeData());
const auto A = graph.getDirectAuthenticationRestrictions(RoleName("roleA", "dbA"));
diff --git a/src/mongo/db/auth/role_graph_update.cpp b/src/mongo/db/auth/role_graph_update.cpp
index 02c89f36bd7..33ee260fa93 100644
--- a/src/mongo/db/auth/role_graph_update.cpp
+++ b/src/mongo/db/auth/role_graph_update.cpp
@@ -92,9 +92,7 @@ Status checkIdMatchesRoleName(const BSONElement& idElement, const RoleName& role
return Status(ErrorCodes::FailedToParse,
str::stream() << "Role document _id fields must be encoded as the string "
"dbname.rolename. Found "
- << idField
- << " for "
- << roleName.getFullName());
+ << idField << " for " << roleName.getFullName());
}
return Status::OK();
}
@@ -312,16 +310,13 @@ Status handleOplogCommand(RoleGraph* roleGraph, const BSONObj& cmdObj) {
if (cmdName == "createIndexes" &&
cmdObj.firstElement().str() == rolesCollectionNamespace.coll()) {
UnorderedFieldsBSONObjComparator instance;
- if (instance.evaluate(cmdObj == (BSON("createIndexes"
- << "system.roles"
- << "v"
- << 2
- << "name"
- << "role_1_db_1"
- << "key"
- << BSON("role" << 1 << "db" << 1)
- << "unique"
- << true)))) {
+ if (instance.evaluate(
+ cmdObj ==
+ (BSON("createIndexes"
+ << "system.roles"
+ << "v" << 2 << "name"
+ << "role_1_db_1"
+ << "key" << BSON("role" << 1 << "db" << 1) << "unique" << true)))) {
return Status::OK();
}
}
diff --git a/src/mongo/db/auth/sasl_authentication_session_test.cpp b/src/mongo/db/auth/sasl_authentication_session_test.cpp
index 97750182061..e849832d6ff 100644
--- a/src/mongo/db/auth/sasl_authentication_session_test.cpp
+++ b/src/mongo/db/auth/sasl_authentication_session_test.cpp
@@ -131,19 +131,17 @@ SaslConversation::SaslConversation(std::string mech)
<< scram::Secrets<SHA256Block>::generateCredentials(
"frim", saslGlobalParams.scramSHA256IterationCount.load()));
- ASSERT_OK(authManagerExternalState->insert(opCtx.get(),
- NamespaceString("admin.system.users"),
- BSON("_id"
- << "test.andy"
- << "user"
- << "andy"
- << "db"
- << "test"
- << "credentials"
- << creds
- << "roles"
- << BSONArray()),
- BSONObj()));
+ ASSERT_OK(
+ authManagerExternalState->insert(opCtx.get(),
+ NamespaceString("admin.system.users"),
+ BSON("_id"
+ << "test.andy"
+ << "user"
+ << "andy"
+ << "db"
+ << "test"
+ << "credentials" << creds << "roles" << BSONArray()),
+ BSONObj()));
}
void SaslConversation::assertConversationFailure() {
diff --git a/src/mongo/db/auth/sasl_mechanism_registry.cpp b/src/mongo/db/auth/sasl_mechanism_registry.cpp
index 2de9fb02fee..bfe479143d3 100644
--- a/src/mongo/db/auth/sasl_mechanism_registry.cpp
+++ b/src/mongo/db/auth/sasl_mechanism_registry.cpp
@@ -79,8 +79,7 @@ StatusWith<std::unique_ptr<ServerMechanismBase>> SASLServerMechanismRegistry::ge
return Status(ErrorCodes::BadValue,
str::stream() << "Unsupported mechanism '" << mechanismName
- << "' on authentication database '"
- << authenticationDatabase
+ << "' on authentication database '" << authenticationDatabase
<< "'");
}
@@ -147,9 +146,7 @@ bool SASLServerMechanismRegistry::_mechanismSupportedByConfig(StringData mechNam
namespace {
ServiceContext::ConstructorActionRegisterer SASLServerMechanismRegistryInitializer{
- "CreateSASLServerMechanismRegistry",
- {"EndStartupOptionStorage"},
- [](ServiceContext* service) {
+ "CreateSASLServerMechanismRegistry", {"EndStartupOptionStorage"}, [](ServiceContext* service) {
SASLServerMechanismRegistry::set(service,
std::make_unique<SASLServerMechanismRegistry>(
saslGlobalParams.authenticationMechanisms));
diff --git a/src/mongo/db/auth/sasl_mechanism_registry_test.cpp b/src/mongo/db/auth/sasl_mechanism_registry_test.cpp
index 6ca988bc9ae..b16df4ec3f8 100644
--- a/src/mongo/db/auth/sasl_mechanism_registry_test.cpp
+++ b/src/mongo/db/auth/sasl_mechanism_registry_test.cpp
@@ -27,11 +27,11 @@
* it in the license file.
*/
-#include "mongo/db/auth/sasl_mechanism_registry.h"
#include "mongo/crypto/mechanism_scram.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_manager_impl.h"
#include "mongo/db/auth/authz_manager_external_state_mock.h"
+#include "mongo/db/auth/sasl_mechanism_registry.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/unittest/unittest.h"
@@ -201,8 +201,7 @@ public:
<< "credentials"
<< BSON("SCRAM-SHA-256"
<< scram::Secrets<SHA256Block>::generateCredentials("sajack‍", 15000))
- << "roles"
- << BSONArray()),
+ << "roles" << BSONArray()),
BSONObj()));
@@ -214,10 +213,8 @@ public:
<< "sajack"
<< "db"
<< "$external"
- << "credentials"
- << BSON("external" << true)
- << "roles"
- << BSONArray()),
+ << "credentials" << BSON("external" << true)
+ << "roles" << BSONArray()),
BSONObj()));
internalSecurity.user = std::make_shared<User>(UserName("__system", "local"));
diff --git a/src/mongo/db/auth/sasl_options_init.cpp b/src/mongo/db/auth/sasl_options_init.cpp
index b83a94fa1c0..51ba683342b 100644
--- a/src/mongo/db/auth/sasl_options_init.cpp
+++ b/src/mongo/db/auth/sasl_options_init.cpp
@@ -95,4 +95,4 @@ MONGO_INITIALIZER_GENERAL(StoreSASLOptions, ("CoreOptions_Store"), ("EndStartupO
(InitializerContext* const context) {
return storeSASLOptions(moe::startupOptionsParsed);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_plain_server_conversation.cpp b/src/mongo/db/auth/sasl_plain_server_conversation.cpp
index 5ef2cf6ac9e..0a88084dea3 100644
--- a/src/mongo/db/auth/sasl_plain_server_conversation.cpp
+++ b/src/mongo/db/auth/sasl_plain_server_conversation.cpp
@@ -60,8 +60,9 @@ StatusWith<bool> trySCRAM(const User::CredentialData& credentials, StringData pw
reinterpret_cast<const std::uint8_t*>(decodedSalt.c_str()) +
decodedSalt.size()),
scram.iterationCount));
- if (scram.storedKey != base64::encode(reinterpret_cast<const char*>(secrets.storedKey().data()),
- secrets.storedKey().size())) {
+ if (scram.storedKey !=
+ base64::encode(reinterpret_cast<const char*>(secrets.storedKey().data()),
+ secrets.storedKey().size())) {
return Status(ErrorCodes::AuthenticationFailed,
str::stream() << "Incorrect user name or password");
}
diff --git a/src/mongo/db/auth/sasl_plain_server_conversation.h b/src/mongo/db/auth/sasl_plain_server_conversation.h
index 26acd1e0aac..d3c6af215ce 100644
--- a/src/mongo/db/auth/sasl_plain_server_conversation.h
+++ b/src/mongo/db/auth/sasl_plain_server_conversation.h
@@ -49,8 +49,9 @@ public:
static constexpr bool isInternal = true;
bool canMakeMechanismForUser(const User* user) const final {
auto credentials = user->getCredentials();
- return !credentials.isExternal && (credentials.scram<SHA1Block>().isValid() ||
- credentials.scram<SHA256Block>().isValid());
+ return !credentials.isExternal &&
+ (credentials.scram<SHA1Block>().isValid() ||
+ credentials.scram<SHA256Block>().isValid());
}
};
diff --git a/src/mongo/db/auth/sasl_scram_server_conversation.cpp b/src/mongo/db/auth/sasl_scram_server_conversation.cpp
index 04a8e53798a..fc223097b4f 100644
--- a/src/mongo/db/auth/sasl_scram_server_conversation.cpp
+++ b/src/mongo/db/auth/sasl_scram_server_conversation.cpp
@@ -99,8 +99,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_fir
return Status(ErrorCodes::BadValue,
str::stream()
<< "Incorrect number of arguments for first SCRAM client message, got "
- << got
- << " expected at least 3");
+ << got << " expected at least 3");
};
/**
@@ -168,8 +167,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_fir
if (!authzId.empty() && ServerMechanismBase::_principalName != authzId) {
return Status(ErrorCodes::BadValue,
str::stream() << "SCRAM user name " << ServerMechanismBase::_principalName
- << " does not match authzid "
- << authzId);
+ << " does not match authzid " << authzId);
}
if (!str::startsWith(input[1], "r=") || input[1].size() < 6) {
@@ -267,7 +265,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_fir
* e=message
*
* NOTE: we are ignoring the channel binding part of the message
-**/
+ **/
template <typename Policy>
StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_secondStep(
OperationContext* opCtx, StringData inputData) {
@@ -275,8 +273,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_sec
return Status(ErrorCodes::BadValue,
str::stream()
<< "Incorrect number of arguments for second SCRAM client message, got "
- << got
- << " expected at least 3");
+ << got << " expected at least 3");
};
/**
@@ -322,9 +319,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_sec
return Status(ErrorCodes::BadValue,
str::stream()
<< "Unmatched SCRAM nonce received from client in second step, expected "
- << _nonce
- << " but received "
- << nonce);
+ << _nonce << " but received " << nonce);
}
// Do server side computations, compare storedKeys and generate client-final-message
diff --git a/src/mongo/db/auth/sasl_scram_test.cpp b/src/mongo/db/auth/sasl_scram_test.cpp
index 6bfd66667eb..23c6c548f3c 100644
--- a/src/mongo/db/auth/sasl_scram_test.cpp
+++ b/src/mongo/db/auth/sasl_scram_test.cpp
@@ -63,16 +63,10 @@ BSONObj generateSCRAMUserDocument(StringData username, StringData password) {
const auto sha256Cred =
scram::Secrets<SHA256Block>::generateCredentials(password.toString(), 15000);
return BSON("_id" << (str::stream() << database << "." << username).operator StringData()
- << AuthorizationManager::USER_NAME_FIELD_NAME
- << username
- << AuthorizationManager::USER_DB_FIELD_NAME
- << database
- << "credentials"
- << BSON("SCRAM-SHA-1" << sha1Cred << "SCRAM-SHA-256" << sha256Cred)
- << "roles"
- << BSONArray()
- << "privileges"
- << BSONArray());
+ << AuthorizationManager::USER_NAME_FIELD_NAME << username
+ << AuthorizationManager::USER_DB_FIELD_NAME << database << "credentials"
+ << BSON("SCRAM-SHA-1" << sha1Cred << "SCRAM-SHA-256" << sha256Cred) << "roles"
+ << BSONArray() << "privileges" << BSONArray());
}
std::string corruptEncodedPayload(const std::string& message,
@@ -303,7 +297,6 @@ TEST_F(SCRAMFixture, testServerStep1DoesNotIncludeNonceFromClientStep1) {
std::string::iterator nonceBegin = serverMessage.begin() + serverMessage.find("r=");
std::string::iterator nonceEnd = std::find(nonceBegin, serverMessage.end(), ',');
serverMessage = serverMessage.replace(nonceBegin, nonceEnd, "r=");
-
});
ASSERT_EQ(
SCRAMStepsResult(SaslTestState(SaslTestState::kClient, 2),
@@ -349,7 +342,6 @@ TEST_F(SCRAMFixture, testClientStep2GivesBadProof) {
std::string::iterator proofEnd = std::find(proofBegin, clientMessage.end(), ',');
clientMessage = clientMessage.replace(
proofBegin, proofEnd, corruptEncodedPayload(clientMessage, proofBegin, proofEnd));
-
});
ASSERT_EQ(SCRAMStepsResult(SaslTestState(SaslTestState::kServer, 2),
@@ -379,7 +371,6 @@ TEST_F(SCRAMFixture, testServerStep2GivesBadVerifier) {
encodedVerifier = corruptEncodedPayload(serverMessage, verifierBegin, verifierEnd);
serverMessage = serverMessage.replace(verifierBegin, verifierEnd, encodedVerifier);
-
});
auto result = runSteps(mutator);
diff --git a/src/mongo/db/auth/security_file.cpp b/src/mongo/db/auth/security_file.cpp
index 04efa479fbc..0dc4bfafe23 100644
--- a/src/mongo/db/auth/security_file.cpp
+++ b/src/mongo/db/auth/security_file.cpp
@@ -74,8 +74,8 @@ StatusWith<std::vector<std::string>> readSecurityFile(const std::string& filenam
// check obvious file errors
if (stat(filename.c_str(), &stats) == -1) {
return Status(ErrorCodes::InvalidPath,
- str::stream() << "Error reading file " << filename << ": "
- << strerror(errno));
+ str::stream()
+ << "Error reading file " << filename << ": " << strerror(errno));
}
#if !defined(_WIN32)
diff --git a/src/mongo/db/auth/user.cpp b/src/mongo/db/auth/user.cpp
index ce869ea28f5..96d1251c316 100644
--- a/src/mongo/db/auth/user.cpp
+++ b/src/mongo/db/auth/user.cpp
@@ -160,7 +160,7 @@ void User::addPrivileges(const PrivilegeVector& privileges) {
}
}
-void User::setRestrictions(RestrictionDocuments restrictions)& {
+void User::setRestrictions(RestrictionDocuments restrictions) & {
_restrictions = std::move(restrictions);
}
diff --git a/src/mongo/db/auth/user_document_parser.cpp b/src/mongo/db/auth/user_document_parser.cpp
index 1c5da7795be..8eb6dc7a94b 100644
--- a/src/mongo/db/auth/user_document_parser.cpp
+++ b/src/mongo/db/auth/user_document_parser.cpp
@@ -152,8 +152,8 @@ Status V2UserDocumentParser::checkValidUserDocument(const BSONObj& doc) const {
StringData userDBStr = userDBElement.valueStringData();
if (!NamespaceString::validDBName(userDBStr, NamespaceString::DollarInDbNameBehavior::Allow) &&
userDBStr != "$external") {
- return _badValue(str::stream() << "'" << userDBStr
- << "' is not a valid value for the db field.");
+ return _badValue(str::stream()
+ << "'" << userDBStr << "' is not a valid value for the db field.");
}
// Validate the "credentials" element
@@ -184,8 +184,8 @@ Status V2UserDocumentParser::checkValidUserDocument(const BSONObj& doc) const {
str::stream() << fieldName << " does not exist");
}
if (scramElement.type() != Object) {
- return _badValue(str::stream() << fieldName
- << " credential must be an object, if present");
+ return _badValue(str::stream()
+ << fieldName << " credential must be an object, if present");
}
return Status::OK();
};
diff --git a/src/mongo/db/auth/user_document_parser_test.cpp b/src/mongo/db/auth/user_document_parser_test.cpp
index af798f525a6..44721c6570d 100644
--- a/src/mongo/db/auth/user_document_parser_test.cpp
+++ b/src/mongo/db/auth/user_document_parser_test.cpp
@@ -83,23 +83,18 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "pwd"
<< "a"
- << "roles"
- << BSON_ARRAY("read"))));
+ << "roles" << BSON_ARRAY("read"))));
// Need name field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< emptyArray)));
// Need source field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< emptyArray)));
// Need credentials field
@@ -107,16 +102,14 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "roles"
- << emptyArray)));
+ << "roles" << emptyArray)));
// Need roles field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials)));
+ << "credentials" << credentials)));
// authenticationRestricitons must be an array if it exists
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
@@ -131,11 +124,8 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
- << emptyArray
- << "authenticationRestrictions"
+ << "credentials" << credentials << "roles"
+ << emptyArray << "authenticationRestrictions"
<< emptyArray)));
// Empty roles arrays are OK
@@ -143,9 +133,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< emptyArray)));
// Need credentials of {external: true} if user's db is $external
@@ -153,19 +141,15 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "$external"
- << "credentials"
- << BSON("external" << true)
- << "roles"
- << emptyArray)));
+ << "credentials" << BSON("external" << true)
+ << "roles" << emptyArray)));
// Roles must be objects
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY("read"))));
// Role needs name
@@ -173,9 +157,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("db"
<< "dbA")))));
@@ -184,9 +166,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA")))));
@@ -196,9 +176,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
@@ -209,9 +187,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
@@ -227,9 +203,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "authenticationRestrictions"
+ << "credentials" << credentials << "authenticationRestrictions"
<< BSON_ARRAY(BSON("clientSource" << BSON_ARRAY("127.0.0.1/8") << "serverAddress"
<< BSON_ARRAY("127.0.0.1/8")))
<< "roles"
@@ -243,9 +217,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "extraData"
+ << "credentials" << credentials << "extraData"
<< BSON("foo"
<< "bar")
<< "roles"
@@ -318,13 +290,13 @@ TEST_F(V2UserDocumentParsing, V2CredentialExtraction) {
ASSERT(!user->getCredentials().isExternal);
// Make sure extracting valid combined credentials works
- ASSERT_OK(v2parser.initializeUserCredentialsFromUserDocument(user.get(),
- BSON("user"
- << "spencer"
- << "db"
- << "test"
- << "credentials"
- << credentials)));
+ ASSERT_OK(
+ v2parser.initializeUserCredentialsFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << credentials)));
ASSERT(user->getCredentials().scram_sha1.isValid());
ASSERT(user->getCredentials().scram_sha256.isValid());
ASSERT(!user->getCredentials().isExternal);
@@ -350,18 +322,18 @@ TEST_F(V2UserDocumentParsing, V2RoleExtraction) {
user.get()));
// V1-style roles arrays no longer work
- ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
- << "spencer"
- << "roles"
- << BSON_ARRAY("read")),
- user.get()));
+ ASSERT_NOT_OK(
+ v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles" << BSON_ARRAY("read")),
+ user.get()));
// Roles must have "db" field
- ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
- << "spencer"
- << "roles"
- << BSON_ARRAY(BSONObj())),
- user.get()));
+ ASSERT_NOT_OK(
+ v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles" << BSON_ARRAY(BSONObj())),
+ user.get()));
ASSERT_NOT_OK(
v2parser.initializeUserRolesFromUserDocument(BSON("user"
@@ -428,16 +400,14 @@ TEST_F(V2UserDocumentParsing, V2AuthenticationRestrictionsExtraction) {
ASSERT_OK(v2parser.initializeAuthenticationRestrictionsFromUserDocument(
BSON("user"
<< "spencer"
- << "authenticationRestrictions"
- << emptyArray),
+ << "authenticationRestrictions" << emptyArray),
user.get()));
// authenticationRestrictions must have at least one of "clientSource"/"serverAdddress" fields
ASSERT_NOT_OK(v2parser.initializeAuthenticationRestrictionsFromUserDocument(
BSON("user"
<< "spencer"
- << "authenticationRestrictions"
- << BSON_ARRAY(emptyObj)),
+ << "authenticationRestrictions" << BSON_ARRAY(emptyObj)),
user.get()));
// authenticationRestrictions must not have unexpected elements
diff --git a/src/mongo/db/auth/user_management_commands_parser.cpp b/src/mongo/db/auth/user_management_commands_parser.cpp
index 29f4bc53574..0d380888ac9 100644
--- a/src/mongo/db/auth/user_management_commands_parser.cpp
+++ b/src/mongo/db/auth/user_management_commands_parser.cpp
@@ -64,8 +64,9 @@ Status _checkNoExtraFields(const BSONObj& cmdObj,
StringData fieldName = (*iter).fieldNameStringData();
if (!isGenericArgument(fieldName) && !validFieldNames.count(fieldName.toString())) {
return Status(ErrorCodes::BadValue,
- str::stream() << "\"" << fieldName << "\" is not "
- "a valid argument to "
+ str::stream() << "\"" << fieldName
+ << "\" is not "
+ "a valid argument to "
<< cmdName);
}
}
@@ -175,8 +176,9 @@ Status parseRolePossessionManipulationCommands(const BSONObj& cmdObj,
if (!parsedRoleNames->size()) {
return Status(ErrorCodes::BadValue,
- str::stream() << cmdName << " command requires a non-empty "
- "\"roles\" array");
+ str::stream() << cmdName
+ << " command requires a non-empty "
+ "\"roles\" array");
}
return Status::OK();
}
@@ -634,8 +636,9 @@ Status parseAndValidateRolePrivilegeManipulationCommands(const BSONObj& cmdObj,
}
if (!parsedPrivileges->size()) {
return Status(ErrorCodes::BadValue,
- str::stream() << cmdName << " command requires a non-empty "
- "\"privileges\" array");
+ str::stream() << cmdName
+ << " command requires a non-empty "
+ "\"privileges\" array");
}
return Status::OK();
diff --git a/src/mongo/db/baton.cpp b/src/mongo/db/baton.cpp
index 29d973fe3e7..f648c3e13ed 100644
--- a/src/mongo/db/baton.cpp
+++ b/src/mongo/db/baton.cpp
@@ -80,7 +80,7 @@ public:
}
}
- _baton->schedule([ this, anchor = shared_from_this() ](Status status) {
+ _baton->schedule([this, anchor = shared_from_this()](Status status) {
_runJobs(stdx::unique_lock(_mutex), status);
});
}
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index c7ceead85a0..89d3719d095 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -64,8 +64,8 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while truncating collection: "
- << collectionName);
+ str::stream()
+ << "Not primary while truncating collection: " << collectionName);
}
Database* db = autoDb.getDb();
@@ -86,8 +86,8 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam
repl::ReplicationCoordinator::modeNone) &&
collectionName.isOplog()) {
return Status(ErrorCodes::OplogOperationUnsupported,
- str::stream() << "Cannot truncate a live oplog while replicating: "
- << collectionName);
+ str::stream()
+ << "Cannot truncate a live oplog while replicating: " << collectionName);
}
BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
@@ -134,8 +134,7 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
uassert(ErrorCodes::NamespaceExists,
str::stream() << "cloneCollectionAsCapped failed - destination collection " << toNss
- << " already exists. source collection: "
- << fromNss,
+ << " already exists. source collection: " << fromNss,
!db->getCollection(opCtx, toNss));
// create new collection
@@ -263,8 +262,7 @@ void convertToCapped(OperationContext* opCtx,
uassertStatusOKWithContext(tmpNameResult,
str::stream()
<< "Cannot generate temporary collection namespace to convert "
- << collectionName
- << " to a capped collection");
+ << collectionName << " to a capped collection");
const auto& longTmpName = tmpNameResult.getValue();
const auto shortTmpName = longTmpName.coll().toString();
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index ab925ab0a97..4bc4dde890d 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -125,8 +125,7 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
fassert(40689,
{ErrorCodes::InternalError,
str::stream() << "failed to get index spec for index " << indexName
- << " in collection "
- << collNss.toString()});
+ << " in collection " << collNss.toString()});
}
auto indexesToRebuild = indexSpecs.getValue();
invariant(
@@ -171,8 +170,8 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
// Note that the collection name already includes the database component.
auto collection = db->getCollection(opCtx, collNss);
invariant(collection,
- str::stream() << "failed to get valid collection pointer for namespace "
- << collNss);
+ str::stream()
+ << "failed to get valid collection pointer for namespace " << collNss);
if (minVisibleTimestampMap.count(collection->uuid()) > 0) {
collection->setMinimumVisibleSnapshot(
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 45c57acc17a..c881c88df99 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -134,8 +134,8 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
cmr.idx = coll->getIndexCatalog()->findIndexByName(opCtx, indexName);
if (!cmr.idx) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "cannot find index " << indexName << " for ns "
- << nss);
+ str::stream()
+ << "cannot find index " << indexName << " for ns " << nss);
}
} else {
std::vector<const IndexDescriptor*> indexes;
@@ -145,17 +145,14 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
if (indexes.size() > 1) {
return Status(ErrorCodes::AmbiguousIndexKeyPattern,
str::stream() << "index keyPattern " << keyPattern << " matches "
- << indexes.size()
- << " indexes,"
+ << indexes.size() << " indexes,"
<< " must use index name. "
- << "Conflicting indexes:"
- << indexes[0]->infoObj()
- << ", "
- << indexes[1]->infoObj());
+ << "Conflicting indexes:" << indexes[0]->infoObj()
+ << ", " << indexes[1]->infoObj());
} else if (indexes.empty()) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "cannot find index " << keyPattern << " for ns "
- << nss);
+ str::stream()
+ << "cannot find index " << keyPattern << " for ns " << nss);
}
cmr.idx = indexes[0];
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index 6e508db8f5c..2021ba83cec 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -252,9 +252,8 @@ private:
mongo::stdx::unordered_map<CollectionUUID, NamespaceString, CollectionUUID::Hash>>
_shadowCatalog;
- using CollectionCatalogMap = mongo::stdx::unordered_map<CollectionUUID,
- std::unique_ptr<Collection>,
- CollectionUUID::Hash>;
+ using CollectionCatalogMap = mongo::stdx::
+ unordered_map<CollectionUUID, std::unique_ptr<Collection>, CollectionUUID::Hash>;
using OrderedCollectionMap = std::map<std::pair<std::string, CollectionUUID>, Collection*>;
using NamespaceCollectionMap = mongo::stdx::unordered_map<NamespaceString, Collection*>;
CollectionCatalogMap _catalog;
diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp
index e9985074fd8..205a5647e75 100644
--- a/src/mongo/db/catalog/collection_catalog_test.cpp
+++ b/src/mongo/db/catalog/collection_catalog_test.cpp
@@ -121,7 +121,7 @@ public:
void checkCollections(std::string dbName) {
unsigned long counter = 0;
- for (auto[orderedIt, catalogIt] = std::tuple{collsIterator(dbName), catalog.begin(dbName)};
+ for (auto [orderedIt, catalogIt] = std::tuple{collsIterator(dbName), catalog.begin(dbName)};
catalogIt != catalog.end() && orderedIt != collsIteratorEnd(dbName);
++catalogIt, ++orderedIt) {
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index eae4770532d..93281c0e541 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -148,9 +148,7 @@ StatusWith<CompactStats> compactCollection(OperationContext* opCtx,
return StatusWith<CompactStats>(
ErrorCodes::CannotCreateIndex,
str::stream() << "Cannot compact collection due to invalid index " << spec
- << ": "
- << keyStatus.reason()
- << " For more info see"
+ << ": " << keyStatus.reason() << " For more info see"
<< " http://dochub.mongodb.org/core/index-validation");
}
indexSpecs.push_back(spec);
diff --git a/src/mongo/db/catalog/collection_compact.h b/src/mongo/db/catalog/collection_compact.h
index a3aefc1833e..719ae06c47b 100644
--- a/src/mongo/db/catalog/collection_compact.h
+++ b/src/mongo/db/catalog/collection_compact.h
@@ -35,9 +35,9 @@
namespace mongo {
/**
- * Compacts collection.
- * See record_store.h for CompactStats and CompactOptions definitions.
- */
+ * Compacts collection.
+ * See record_store.h for CompactStats and CompactOptions definitions.
+ */
StatusWith<CompactStats> compactCollection(OperationContext* opCtx,
const NamespaceString& collectionNss,
const CompactOptions* options);
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index de9a80f070d..6ebdaf1ec51 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -320,17 +320,13 @@ StatusWithMatchExpression CollectionImpl::parseValidator(
if (ns().isSystem() && !ns().isDropPendingNamespace()) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Document validators not allowed on system collection " << ns()
- << " with UUID "
- << _uuid};
+ << " with UUID " << _uuid};
}
if (ns().isOnInternalDb()) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Document validators are not allowed on collection " << ns().ns()
- << " with UUID "
- << _uuid
- << " in the "
- << ns().db()
+ << " with UUID " << _uuid << " in the " << ns().db()
<< " internal database"};
}
@@ -418,8 +414,9 @@ Status CollectionImpl::insertDocuments(OperationContext* opCtx,
const auto firstIdElem = data["first_id"];
// If the failpoint specifies no collection or matches the existing one, hang.
if ((!collElem || _ns.ns() == collElem.str()) &&
- (!firstIdElem || (begin != end && firstIdElem.type() == mongo::String &&
- begin->doc["_id"].str() == firstIdElem.str()))) {
+ (!firstIdElem ||
+ (begin != end && firstIdElem.type() == mongo::String &&
+ begin->doc["_id"].str() == firstIdElem.str()))) {
string whenFirst =
firstIdElem ? (string(" when first _id is ") + firstIdElem.str()) : "";
while (MONGO_FAIL_POINT(hangAfterCollectionInserts)) {
@@ -675,9 +672,7 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx,
if (_recordStore->isCapped() && oldSize != newDoc.objsize())
uasserted(ErrorCodes::CannotGrowDocumentInCappedNamespace,
str::stream() << "Cannot change the size of a document in a capped collection: "
- << oldSize
- << " != "
- << newDoc.objsize());
+ << oldSize << " != " << newDoc.objsize());
args->preImageDoc = oldDoc.value().getOwned();
@@ -850,11 +845,9 @@ Status CollectionImpl::setValidator(OperationContext* opCtx, BSONObj validatorDo
DurableCatalog::get(opCtx)->updateValidator(
opCtx, ns(), validatorDoc, getValidationLevel(), getValidationAction());
- opCtx->recoveryUnit()->onRollback([
- this,
- oldValidator = std::move(_validator),
- oldValidatorDoc = std::move(_validatorDoc)
- ]() mutable {
+ opCtx->recoveryUnit()->onRollback([this,
+ oldValidator = std::move(_validator),
+ oldValidatorDoc = std::move(_validatorDoc)]() mutable {
this->_validator = std::move(oldValidator);
this->_validatorDoc = std::move(oldValidatorDoc);
});
@@ -930,13 +923,11 @@ Status CollectionImpl::updateValidator(OperationContext* opCtx,
StringData newAction) {
invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
- opCtx->recoveryUnit()->onRollback([
- this,
- oldValidator = std::move(_validator),
- oldValidatorDoc = std::move(_validatorDoc),
- oldValidationLevel = _validationLevel,
- oldValidationAction = _validationAction
- ]() mutable {
+ opCtx->recoveryUnit()->onRollback([this,
+ oldValidator = std::move(_validator),
+ oldValidatorDoc = std::move(_validatorDoc),
+ oldValidationLevel = _validationLevel,
+ oldValidationAction = _validationAction]() mutable {
this->_validator = std::move(oldValidator);
this->_validatorDoc = std::move(oldValidatorDoc);
this->_validationLevel = oldValidationLevel;
diff --git a/src/mongo/db/catalog/collection_options.cpp b/src/mongo/db/catalog/collection_options.cpp
index b9ca30d8bc6..75c5fb91d14 100644
--- a/src/mongo/db/catalog/collection_options.cpp
+++ b/src/mongo/db/catalog/collection_options.cpp
@@ -255,9 +255,9 @@ StatusWith<CollectionOptions> CollectionOptions::parse(const BSONObj& options, P
collectionOptions.idIndex = std::move(tempIdIndex);
} else if (!createdOn24OrEarlier && !mongo::isGenericArgument(fieldName)) {
return Status(ErrorCodes::InvalidOptions,
- str::stream() << "The field '" << fieldName
- << "' is not a valid collection option. Options: "
- << options);
+ str::stream()
+ << "The field '" << fieldName
+ << "' is not a valid collection option. Options: " << options);
}
}
@@ -413,4 +413,4 @@ bool CollectionOptions::matchesStorageOptions(const CollectionOptions& other,
return true;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_options.h b/src/mongo/db/catalog/collection_options.h
index ca55a98a5c8..f5f63b0f72f 100644
--- a/src/mongo/db/catalog/collection_options.h
+++ b/src/mongo/db/catalog/collection_options.h
@@ -148,4 +148,4 @@ struct CollectionOptions {
// The aggregation pipeline that defines this view.
BSONObj pipeline;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_validation.cpp b/src/mongo/db/catalog/collection_validation.cpp
index 4eff8a7f9a4..9b6c87d55e0 100644
--- a/src/mongo/db/catalog/collection_validation.cpp
+++ b/src/mongo/db/catalog/collection_validation.cpp
@@ -300,10 +300,8 @@ void addErrorIfUnequal(T stored, T cached, StringData name, ValidateResults* res
if (stored != cached) {
results->valid = false;
results->errors.push_back(str::stream() << "stored value for " << name
- << " does not match cached value: "
- << stored
- << " != "
- << cached);
+ << " does not match cached value: " << stored
+ << " != " << cached);
}
}
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 5813440c265..d5fa352e829 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -237,11 +237,10 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
<< " - existing collection with conflicting UUID " << uuid
<< " is in a drop-pending state: " << *currentName;
return Result(Status(ErrorCodes::NamespaceExists,
- str::stream() << "existing collection "
- << currentName->toString()
- << " with conflicting UUID "
- << uuid.toString()
- << " is in a drop-pending state."));
+ str::stream()
+ << "existing collection " << currentName->toString()
+ << " with conflicting UUID " << uuid.toString()
+ << " is in a drop-pending state."));
}
// In the case of oplog replay, a future command may have created or renamed a
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index b3561aa2958..7ce5474e987 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -123,9 +123,7 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, StringData ns, boo
auto duplicates = _getNamesWithConflictingCasing_inlock(dbname);
uassert(ErrorCodes::DatabaseDifferCase,
str::stream() << "db already exists with different case already have: ["
- << *duplicates.cbegin()
- << "] trying to create ["
- << dbname.toString()
+ << *duplicates.cbegin() << "] trying to create [" << dbname.toString()
<< "]",
duplicates.empty());
@@ -241,8 +239,8 @@ void DatabaseHolderImpl::closeAll(OperationContext* opCtx) {
// It is the caller's responsibility to ensure that no index builds are active in the
// database.
invariant(!coll->getIndexCatalog()->haveAnyIndexesInProgress(),
- str::stream() << "An index is building on collection '" << coll->ns()
- << "'.");
+ str::stream()
+ << "An index is building on collection '" << coll->ns() << "'.");
}
dbs.insert(i->first);
}
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index d5c57cc8db4..cc5c3b33792 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -354,8 +354,7 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
auto numIndexesInProgress = collection->getIndexCatalog()->numIndexesInProgress(opCtx);
massert(ErrorCodes::BackgroundOperationInProgressForNamespace,
str::stream() << "cannot drop collection " << nss << " (" << uuid << ") when "
- << numIndexesInProgress
- << " index builds in progress.",
+ << numIndexesInProgress << " index builds in progress.",
numIndexesInProgress == 0);
audit::logDropCollection(&cc(), nss.toString());
@@ -605,8 +604,8 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
bool generatedUUID = false;
if (!optionsWithUUID.uuid) {
if (!canAcceptWrites) {
- std::string msg = str::stream() << "Attempted to create a new collection " << nss
- << " without a UUID";
+ std::string msg = str::stream()
+ << "Attempted to create a new collection " << nss << " without a UUID";
severe() << msg;
uasserted(ErrorCodes::InvalidOptions, msg);
} else {
@@ -700,10 +699,10 @@ StatusWith<NamespaceString> DatabaseImpl::makeUniqueCollectionNamespace(
auto numPercentSign = std::count(collectionNameModel.begin(), collectionNameModel.end(), '%');
if (numPercentSign == 0) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Cannot generate collection name for temporary collection: "
- "model for collection name "
- << collectionNameModel
- << " must contain at least one percent sign.");
+ str::stream()
+ << "Cannot generate collection name for temporary collection: "
+ "model for collection name "
+ << collectionNameModel << " must contain at least one percent sign.");
}
if (!_uniqueCollectionNamespacePseudoRandom) {
@@ -742,9 +741,7 @@ StatusWith<NamespaceString> DatabaseImpl::makeUniqueCollectionNamespace(
return Status(
ErrorCodes::NamespaceExists,
str::stream() << "Cannot generate collection name for temporary collection with model "
- << collectionNameModel
- << " after "
- << numGenerationAttempts
+ << collectionNameModel << " after " << numGenerationAttempts
<< " attempts due to namespace conflicts with existing collections.");
}
@@ -873,8 +870,7 @@ Status DatabaseImpl::userCreateNS(OperationContext* opCtx,
} else {
invariant(createCollection(opCtx, nss, collectionOptions, createDefaultIndexes, idIndex),
str::stream() << "Collection creation failed after validating options: " << nss
- << ". Options: "
- << collectionOptions.toBSON());
+ << ". Options: " << collectionOptions.toBSON());
}
return Status::OK();
diff --git a/src/mongo/db/catalog/database_test.cpp b/src/mongo/db/catalog/database_test.cpp
index 28173e8fbb0..542cae76e80 100644
--- a/src/mongo/db/catalog/database_test.cpp
+++ b/src/mongo/db/catalog/database_test.cpp
@@ -160,13 +160,13 @@ TEST_F(DatabaseTest, CreateCollectionThrowsExceptionWhenDatabaseIsInADropPending
// tests.
ON_BLOCK_EXIT([&wuow] { wuow.commit(); });
- ASSERT_THROWS_CODE_AND_WHAT(
- db->createCollection(_opCtx.get(), _nss),
- AssertionException,
- ErrorCodes::DatabaseDropPending,
- (StringBuilder() << "Cannot create collection " << _nss
- << " - database is in the process of being dropped.")
- .stringData());
+ ASSERT_THROWS_CODE_AND_WHAT(db->createCollection(_opCtx.get(), _nss),
+ AssertionException,
+ ErrorCodes::DatabaseDropPending,
+ (StringBuilder()
+ << "Cannot create collection " << _nss
+ << " - database is in the process of being dropped.")
+ .stringData());
});
}
@@ -299,11 +299,10 @@ void _testDropCollectionThrowsExceptionIfThereAreIndexesInProgress(OperationCont
auto indexCatalog = collection->getIndexCatalog();
ASSERT_EQUALS(indexCatalog->numIndexesInProgress(opCtx), 0);
- auto indexInfoObj = BSON(
- "v" << int(IndexDescriptor::kLatestIndexVersion) << "key" << BSON("a" << 1) << "name"
- << "a_1"
- << "ns"
- << nss.ns());
+ auto indexInfoObj = BSON("v" << int(IndexDescriptor::kLatestIndexVersion) << "key"
+ << BSON("a" << 1) << "name"
+ << "a_1"
+ << "ns" << nss.ns());
auto indexBuildBlock = std::make_unique<IndexBuildBlock>(
indexCatalog, collection->ns(), indexInfoObj, IndexBuildMethod::kHybrid);
@@ -410,8 +409,7 @@ TEST_F(DatabaseTest, MakeUniqueCollectionNamespaceReplacesPercentSignsWithRandom
auto nss1 = unittest::assertGet(db->makeUniqueCollectionNamespace(_opCtx.get(), model));
if (!re.FullMatch(nss1.ns())) {
FAIL((StringBuilder() << "First generated namespace \"" << nss1.ns()
- << "\" does not match reqular expression \""
- << re.pattern()
+ << "\" does not match reqular expression \"" << re.pattern()
<< "\"")
.str());
}
@@ -428,8 +426,7 @@ TEST_F(DatabaseTest, MakeUniqueCollectionNamespaceReplacesPercentSignsWithRandom
auto nss2 = unittest::assertGet(db->makeUniqueCollectionNamespace(_opCtx.get(), model));
if (!re.FullMatch(nss2.ns())) {
FAIL((StringBuilder() << "Second generated namespace \"" << nss2.ns()
- << "\" does not match reqular expression \""
- << re.pattern()
+ << "\" does not match reqular expression \"" << re.pattern()
<< "\"")
.str());
}
@@ -522,28 +519,28 @@ TEST_F(DatabaseTest, AutoGetCollectionForReadCommandSucceedsWithDeadlineMin) {
}
TEST_F(DatabaseTest, CreateCollectionProhibitsReplicatedCollectionsWithoutIdIndex) {
- writeConflictRetry(
- _opCtx.get(),
- "testÇreateCollectionProhibitsReplicatedCollectionsWithoutIdIndex",
- _nss.ns(),
- [this] {
- AutoGetOrCreateDb autoDb(_opCtx.get(), _nss.db(), MODE_X);
- auto db = autoDb.getDb();
- ASSERT_TRUE(db);
-
- WriteUnitOfWork wuow(_opCtx.get());
-
- CollectionOptions options;
- options.setNoIdIndex();
-
- ASSERT_THROWS_CODE_AND_WHAT(
- db->createCollection(_opCtx.get(), _nss, options),
- AssertionException,
- 50001,
- (StringBuilder() << "autoIndexId:false is not allowed for collection " << _nss
- << " because it can be replicated")
- .stringData());
- });
+ writeConflictRetry(_opCtx.get(),
+ "testÇreateCollectionProhibitsReplicatedCollectionsWithoutIdIndex",
+ _nss.ns(),
+ [this] {
+ AutoGetOrCreateDb autoDb(_opCtx.get(), _nss.db(), MODE_X);
+ auto db = autoDb.getDb();
+ ASSERT_TRUE(db);
+
+ WriteUnitOfWork wuow(_opCtx.get());
+
+ CollectionOptions options;
+ options.setNoIdIndex();
+
+ ASSERT_THROWS_CODE_AND_WHAT(
+ db->createCollection(_opCtx.get(), _nss, options),
+ AssertionException,
+ 50001,
+ (StringBuilder()
+ << "autoIndexId:false is not allowed for collection " << _nss
+ << " because it can be replicated")
+ .stringData());
+ });
}
diff --git a/src/mongo/db/catalog/document_validation.h b/src/mongo/db/catalog/document_validation.h
index 27a7969c6d6..e27dfb11b66 100644
--- a/src/mongo/db/catalog/document_validation.h
+++ b/src/mongo/db/catalog/document_validation.h
@@ -84,4 +84,4 @@ public:
private:
boost::optional<DisableDocumentValidation> _documentValidationDisabler;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index fd471f8ad50..7cb0b952f4d 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -275,12 +275,11 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
}
if (!result.status.isOK()) {
- return result.status.withContext(
- str::stream() << "dropDatabase " << dbName << " failed waiting for "
- << numCollectionsToDrop
- << " collection drop(s) (most recent drop optime: "
- << awaitOpTime.toString()
- << ") to replicate.");
+ return result.status.withContext(str::stream()
+ << "dropDatabase " << dbName << " failed waiting for "
+ << numCollectionsToDrop
+ << " collection drop(s) (most recent drop optime: "
+ << awaitOpTime.toString() << ") to replicate.");
}
log() << "dropDatabase " << dbName << " - successfully dropped " << numCollectionsToDrop
@@ -301,8 +300,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << "Could not drop database " << dbName
<< " because it does not exist after dropping "
- << numCollectionsToDrop
- << " collection(s).");
+ << numCollectionsToDrop << " collection(s).");
}
bool userInitiatedWritesAndNotPrimary =
@@ -310,12 +308,11 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::PrimarySteppedDown,
- str::stream() << "Could not drop database " << dbName
- << " because we transitioned from PRIMARY to "
- << replCoord->getMemberState().toString()
- << " while waiting for "
- << numCollectionsToDrop
- << " pending collection drop(s).");
+ str::stream()
+ << "Could not drop database " << dbName
+ << " because we transitioned from PRIMARY to "
+ << replCoord->getMemberState().toString() << " while waiting for "
+ << numCollectionsToDrop << " pending collection drop(s).");
}
// _finishDropDatabase creates its own scope guard to ensure drop-pending is unset.
diff --git a/src/mongo/db/catalog/drop_database_test.cpp b/src/mongo/db/catalog/drop_database_test.cpp
index 2eb37e80a36..af36872c701 100644
--- a/src/mongo/db/catalog/drop_database_test.cpp
+++ b/src/mongo/db/catalog/drop_database_test.cpp
@@ -432,10 +432,10 @@ TEST_F(DropDatabaseTest,
auto status = dropDatabase(_opCtx.get(), _nss.db().toString());
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, status);
- ASSERT_EQUALS(
- status.reason(),
- std::string(str::stream() << "Could not drop database " << _nss.db()
- << " because it does not exist after dropping 1 collection(s)."));
+ ASSERT_EQUALS(status.reason(),
+ std::string(str::stream()
+ << "Could not drop database " << _nss.db()
+ << " because it does not exist after dropping 1 collection(s)."));
ASSERT_FALSE(AutoGetDb(_opCtx.get(), _nss.db(), MODE_X).getDb());
}
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index cd56e85e2b6..3e212385757 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -105,7 +105,6 @@ Status wrappedRun(OperationContext* opCtx,
collection->uuid(),
desc->indexName(),
desc->infoObj());
-
});
anObjBuilder->append("msg", "non-_id indexes dropped for collection");
@@ -121,16 +120,14 @@ Status wrappedRun(OperationContext* opCtx,
opCtx, indexElem.embeddedObject(), false, &indexes);
if (indexes.empty()) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "can't find index with key: "
- << indexElem.embeddedObject());
+ str::stream()
+ << "can't find index with key: " << indexElem.embeddedObject());
} else if (indexes.size() > 1) {
return Status(ErrorCodes::AmbiguousIndexKeyPattern,
- str::stream() << indexes.size() << " indexes found for key: "
- << indexElem.embeddedObject()
+ str::stream() << indexes.size()
+ << " indexes found for key: " << indexElem.embeddedObject()
<< ", identify by name instead."
- << " Conflicting indexes: "
- << indexes[0]->infoObj()
- << ", "
+ << " Conflicting indexes: " << indexes[0]->infoObj() << ", "
<< indexes[1]->infoObj());
}
@@ -166,23 +163,19 @@ Status wrappedRun(OperationContext* opCtx,
for (auto indexNameElem : indexElem.Array()) {
if (indexNameElem.type() != String) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "dropIndexes " << collection->ns() << " ("
- << collection->uuid()
- << ") failed to drop multiple indexes "
- << indexElem.toString(false)
- << ": index name must be a string");
+ str::stream()
+ << "dropIndexes " << collection->ns() << " ("
+ << collection->uuid() << ") failed to drop multiple indexes "
+ << indexElem.toString(false) << ": index name must be a string");
}
auto indexToDelete = indexNameElem.String();
auto status = dropIndexByName(opCtx, collection, indexCatalog, indexToDelete);
if (!status.isOK()) {
- return status.withContext(str::stream() << "dropIndexes " << collection->ns()
- << " ("
- << collection->uuid()
- << ") failed to drop multiple indexes "
- << indexElem.toString(false)
- << ": "
- << indexToDelete);
+ return status.withContext(
+ str::stream() << "dropIndexes " << collection->ns() << " ("
+ << collection->uuid() << ") failed to drop multiple indexes "
+ << indexElem.toString(false) << ": " << indexToDelete);
}
}
diff --git a/src/mongo/db/catalog/health_log.cpp b/src/mongo/db/catalog/health_log.cpp
index 0bd4171c262..2703dee4aa1 100644
--- a/src/mongo/db/catalog/health_log.cpp
+++ b/src/mongo/db/catalog/health_log.cpp
@@ -48,7 +48,7 @@ CollectionOptions getOptions(void) {
options.cappedSize = kDefaultHealthlogSize;
return options;
}
-}
+} // namespace
HealthLog::HealthLog() : _writer(nss, getOptions(), kMaxBufferSize) {}
@@ -78,4 +78,4 @@ bool HealthLog::log(const HealthLogEntry& entry) {
}
const NamespaceString HealthLog::nss("local", "system.healthlog");
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/health_log.h b/src/mongo/db/catalog/health_log.h
index 2b312f741fa..ba2bcbf440a 100644
--- a/src/mongo/db/catalog/health_log.h
+++ b/src/mongo/db/catalog/health_log.h
@@ -91,4 +91,4 @@ public:
private:
DeferredWriter _writer;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/index_build_block.cpp b/src/mongo/db/catalog/index_build_block.cpp
index 7666f5b0900..60eb8152cbf 100644
--- a/src/mongo/db/catalog/index_build_block.cpp
+++ b/src/mongo/db/catalog/index_build_block.cpp
@@ -119,14 +119,14 @@ Status IndexBuildBlock::init(OperationContext* opCtx, Collection* collection) {
}
if (isBackgroundIndex) {
- opCtx->recoveryUnit()->onCommit([ entry = _indexCatalogEntry, coll = collection ](
- boost::optional<Timestamp> commitTime) {
- // This will prevent the unfinished index from being visible on index iterators.
- if (commitTime) {
- entry->setMinimumVisibleSnapshot(commitTime.get());
- coll->setMinimumVisibleSnapshot(commitTime.get());
- }
- });
+ opCtx->recoveryUnit()->onCommit(
+ [entry = _indexCatalogEntry, coll = collection](boost::optional<Timestamp> commitTime) {
+ // This will prevent the unfinished index from being visible on index iterators.
+ if (commitTime) {
+ entry->setMinimumVisibleSnapshot(commitTime.get());
+ coll->setMinimumVisibleSnapshot(commitTime.get());
+ }
+ });
}
// Register this index with the CollectionInfoCache to regenerate the cache. This way, updates
@@ -177,8 +177,8 @@ void IndexBuildBlock::success(OperationContext* opCtx, Collection* collection) {
collection->indexBuildSuccess(opCtx, _indexCatalogEntry);
- opCtx->recoveryUnit()->onCommit([ opCtx, entry = _indexCatalogEntry, coll = collection ](
- boost::optional<Timestamp> commitTime) {
+ opCtx->recoveryUnit()->onCommit([opCtx, entry = _indexCatalogEntry, coll = collection](
+ boost::optional<Timestamp> commitTime) {
// Note: this runs after the WUOW commits but before we release our X lock on the
// collection. This means that any snapshot created after this must include the full
// index, and no one can try to read this index before we set the visibility.
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index fe816ce7dc4..2720ffd09c7 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -86,8 +86,7 @@ Status IndexBuildsManager::setUpIndexBuild(OperationContext* opCtx,
const auto& nss = collection->ns();
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X),
str::stream() << "Unable to set up index build " << buildUUID << ": collection "
- << nss.ns()
- << " is not locked in exclusive mode.");
+ << nss.ns() << " is not locked in exclusive mode.");
auto builder = _getBuilder(buildUUID);
diff --git a/src/mongo/db/catalog/index_builds_manager_test.cpp b/src/mongo/db/catalog/index_builds_manager_test.cpp
index 3ecb5dca2a1..df5e50d244c 100644
--- a/src/mongo/db/catalog/index_builds_manager_test.cpp
+++ b/src/mongo/db/catalog/index_builds_manager_test.cpp
@@ -76,8 +76,7 @@ std::vector<BSONObj> makeSpecs(const NamespaceString& nss, std::vector<std::stri
std::vector<BSONObj> indexSpecs;
for (auto keyName : keys) {
indexSpecs.push_back(BSON("ns" << nss.toString() << "v" << 2 << "key" << BSON(keyName << 1)
- << "name"
- << (keyName + "_1")));
+ << "name" << (keyName + "_1")));
}
return indexSpecs;
}
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index 2bcb83ce3ac..9a30dfc9687 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -317,8 +317,10 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
fassert(31164, status);
indexMetadataHasChanged = DurableCatalog::get(opCtx)->setIndexIsMultikey(
opCtx, _ns, _descriptor->indexName(), paths);
- opCtx->recoveryUnit()->onCommit([onMultikeyCommitFn, indexMetadataHasChanged](
- boost::optional<Timestamp>) { onMultikeyCommitFn(indexMetadataHasChanged); });
+ opCtx->recoveryUnit()->onCommit(
+ [onMultikeyCommitFn, indexMetadataHasChanged](boost::optional<Timestamp>) {
+ onMultikeyCommitFn(indexMetadataHasChanged);
+ });
wuow.commit();
});
} else {
@@ -326,8 +328,10 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
opCtx, _ns, _descriptor->indexName(), paths);
}
- opCtx->recoveryUnit()->onCommit([onMultikeyCommitFn, indexMetadataHasChanged](
- boost::optional<Timestamp>) { onMultikeyCommitFn(indexMetadataHasChanged); });
+ opCtx->recoveryUnit()->onCommit(
+ [onMultikeyCommitFn, indexMetadataHasChanged](boost::optional<Timestamp>) {
+ onMultikeyCommitFn(indexMetadataHasChanged);
+ });
// Within a multi-document transaction, reads should be able to see the effect of previous
// writes done within that transaction. If a previous write in a transaction has set the index
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 0bddaac7c57..f060325141d 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -171,8 +171,7 @@ string IndexCatalogImpl::_getAccessMethodName(const BSONObj& keyPattern) const {
// supports an index plugin unsupported by this version.
uassert(17197,
str::stream() << "Invalid index type '" << pluginName << "' "
- << "in index "
- << keyPattern,
+ << "in index " << keyPattern,
IndexNames::isKnownName(pluginName));
return pluginName;
@@ -392,17 +391,16 @@ IndexCatalogEntry* IndexCatalogImpl::createIndexEntry(OperationContext* opCtx,
}
if (!initFromDisk) {
- opCtx->recoveryUnit()->onRollback(
- [ this, opCtx, isReadyIndex, descriptor = descriptorPtr ] {
- // Need to preserve indexName as descriptor no longer exists after remove().
- const std::string indexName = descriptor->indexName();
- if (isReadyIndex) {
- _readyIndexes.remove(descriptor);
- } else {
- _buildingIndexes.remove(descriptor);
- }
- _collection->infoCache()->droppedIndex(opCtx, indexName);
- });
+ opCtx->recoveryUnit()->onRollback([this, opCtx, isReadyIndex, descriptor = descriptorPtr] {
+ // Need to preserve indexName as descriptor no longer exists after remove().
+ const std::string indexName = descriptor->indexName();
+ if (isReadyIndex) {
+ _readyIndexes.remove(descriptor);
+ } else {
+ _buildingIndexes.remove(descriptor);
+ }
+ _collection->infoCache()->droppedIndex(opCtx, indexName);
+ });
}
return save;
@@ -413,10 +411,8 @@ StatusWith<BSONObj> IndexCatalogImpl::createIndexOnEmptyCollection(OperationCont
invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
invariant(_collection->numRecords(opCtx) == 0,
str::stream() << "Collection must be empty. Collection: " << _collection->ns()
- << " UUID: "
- << _collection->uuid()
- << " Count: "
- << _collection->numRecords(opCtx));
+ << " UUID: " << _collection->uuid()
+ << " Count: " << _collection->numRecords(opCtx));
_checkMagic();
@@ -523,8 +519,7 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
if (!IndexDescriptor::isIndexVersionSupported(indexVersion)) {
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "this version of mongod cannot build new indexes "
- << "of version number "
- << static_cast<int>(indexVersion));
+ << "of version number " << static_cast<int>(indexVersion));
}
if (nss.isOplog())
@@ -542,9 +537,7 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "the \"ns\" field of the index spec '"
<< specNamespace.valueStringData()
- << "' does not match the collection name '"
- << nss
- << "'");
+ << "' does not match the collection name '" << nss << "'");
}
// logical name of the index
@@ -563,8 +556,8 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
const Status keyStatus = index_key_validate::validateKeyPattern(key, indexVersion);
if (!keyStatus.isOK()) {
return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "bad index key pattern " << key << ": "
- << keyStatus.reason());
+ str::stream()
+ << "bad index key pattern " << key << ": " << keyStatus.reason());
}
const string pluginName = IndexNames::findPluginName(key);
@@ -593,18 +586,16 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
if (static_cast<IndexVersion>(vElt.numberInt()) < IndexVersion::kV2) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Index version " << vElt.fieldNameStringData() << "="
- << vElt.numberInt()
- << " does not support the '"
- << collationElement.fieldNameStringData()
- << "' option"};
+ << vElt.numberInt() << " does not support the '"
+ << collationElement.fieldNameStringData() << "' option"};
}
if ((pluginName != IndexNames::BTREE) && (pluginName != IndexNames::GEO_2DSPHERE) &&
(pluginName != IndexNames::HASHED) && (pluginName != IndexNames::WILDCARD)) {
return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "Index type '" << pluginName
- << "' does not support collation: "
- << collator->getSpec().toBSON());
+ str::stream()
+ << "Index type '" << pluginName
+ << "' does not support collation: " << collator->getSpec().toBSON());
}
}
@@ -625,8 +616,8 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
if (spec.getField("expireAfterSeconds")) {
return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "Index type '" << pluginName
- << "' cannot be a TTL index");
+ str::stream()
+ << "Index type '" << pluginName << "' cannot be a TTL index");
}
}
@@ -736,21 +727,18 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
<< "An index with the same key pattern, but a different "
<< "collation already exists with the same name. Try again with "
<< "a unique name. "
- << "Existing index: "
- << desc->infoObj()
- << " Requested index: "
- << spec);
+ << "Existing index: " << desc->infoObj()
+ << " Requested index: " << spec);
}
if (SimpleBSONObjComparator::kInstance.evaluate(desc->keyPattern() != key) ||
SimpleBSONObjComparator::kInstance.evaluate(
desc->infoObj().getObjectField("collation") != collation)) {
return Status(ErrorCodes::IndexKeySpecsConflict,
- str::stream() << "Index must have unique name."
- << "The existing index: "
- << desc->infoObj()
- << " has the same name as the requested index: "
- << spec);
+ str::stream()
+ << "Index must have unique name."
+ << "The existing index: " << desc->infoObj()
+ << " has the same name as the requested index: " << spec);
}
IndexDescriptor temp(_collection, _getAccessMethodName(key), spec);
@@ -776,9 +764,9 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
IndexDescriptor temp(_collection, _getAccessMethodName(key), spec);
if (!desc->areIndexOptionsEquivalent(&temp))
return Status(ErrorCodes::IndexOptionsConflict,
- str::stream() << "Index: " << spec
- << " already exists with different options: "
- << desc->infoObj());
+ str::stream()
+ << "Index: " << spec
+ << " already exists with different options: " << desc->infoObj());
return Status(ErrorCodes::IndexOptionsConflict,
str::stream() << "Index with name: " << name
@@ -803,8 +791,7 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "only one text index per collection allowed, "
<< "found existing text index \""
- << textIndexes[0]->indexName()
- << "\"");
+ << textIndexes[0]->indexName() << "\"");
}
}
return Status::OK();
diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp
index b1d6f13390d..7491916b87a 100644
--- a/src/mongo/db/catalog/index_consistency.cpp
+++ b/src/mongo/db/catalog/index_consistency.cpp
@@ -303,8 +303,7 @@ BSONObj IndexConsistency::_generateInfo(const IndexInfo& indexInfo,
if (idKey) {
return BSON("indexName" << indexName << "recordId" << recordId.repr() << "idKey" << *idKey
- << "indexKey"
- << rehydratedKey);
+ << "indexKey" << rehydratedKey);
} else {
return BSON("indexName" << indexName << "recordId" << recordId.repr() << "indexKey"
<< rehydratedKey);
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index 191194f47f6..2bc450516fb 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -108,7 +108,7 @@ static const std::set<StringData> allowedIdIndexFieldNames = {
IndexDescriptor::kNamespaceFieldName,
// Index creation under legacy writeMode can result in an index spec with an _id field.
"_id"};
-}
+} // namespace
Status validateKeyPattern(const BSONObj& key, IndexDescriptor::IndexVersion indexVersion) {
const ErrorCodes::Error code = ErrorCodes::CannotCreateIndex;
@@ -134,8 +134,7 @@ Status validateKeyPattern(const BSONObj& key, IndexDescriptor::IndexVersion inde
if (keyElement.type() == BSONType::Object || keyElement.type() == BSONType::Array) {
return {code,
str::stream() << "Values in index key pattern cannot be of type "
- << typeName(keyElement.type())
- << " for index version v:"
+ << typeName(keyElement.type()) << " for index version v:"
<< static_cast<int>(indexVersion)};
}
@@ -276,9 +275,9 @@ StatusWith<BSONObj> validateIndexSpec(
if (IndexDescriptor::kKeyPatternFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kKeyPatternFieldName
- << "' must be an object, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kKeyPatternFieldName
+ << "' must be an object, but got " << typeName(indexSpecElem.type())};
}
std::vector<StringData> keys;
@@ -321,18 +320,18 @@ StatusWith<BSONObj> validateIndexSpec(
} else if (IndexDescriptor::kIndexNameFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kIndexNameFieldName
- << "' must be a string, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kIndexNameFieldName
+ << "' must be a string, but got " << typeName(indexSpecElem.type())};
}
hasIndexNameField = true;
} else if (IndexDescriptor::kNamespaceFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kNamespaceFieldName
- << "' must be a string, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kNamespaceFieldName
+ << "' must be a string, but got " << typeName(indexSpecElem.type())};
}
StringData ns = indexSpecElem.valueStringData();
@@ -344,22 +343,19 @@ StatusWith<BSONObj> validateIndexSpec(
if (ns != expectedNamespace.ns()) {
return {ErrorCodes::BadValue,
- str::stream() << "The value of the field '"
- << IndexDescriptor::kNamespaceFieldName
- << "' ("
- << ns
- << ") doesn't match the namespace '"
- << expectedNamespace
- << "'"};
+ str::stream()
+ << "The value of the field '" << IndexDescriptor::kNamespaceFieldName
+ << "' (" << ns << ") doesn't match the namespace '" << expectedNamespace
+ << "'"};
}
hasNamespaceField = true;
} else if (IndexDescriptor::kIndexVersionFieldName == indexSpecElemFieldName) {
if (!indexSpecElem.isNumber()) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kIndexVersionFieldName
- << "' must be a number, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kIndexVersionFieldName
+ << "' must be a number, but got " << typeName(indexSpecElem.type())};
}
auto requestedIndexVersionAsInt = representAs<int>(indexSpecElem.number());
@@ -383,9 +379,9 @@ StatusWith<BSONObj> validateIndexSpec(
} else if (IndexDescriptor::kCollationFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kCollationFieldName
- << "' must be an object, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kCollationFieldName
+ << "' must be an object, but got " << typeName(indexSpecElem.type())};
}
if (indexSpecElem.Obj().isEmpty()) {
@@ -398,10 +394,9 @@ StatusWith<BSONObj> validateIndexSpec(
} else if (IndexDescriptor::kPartialFilterExprFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '"
- << IndexDescriptor::kPartialFilterExprFieldName
- << "' must be an object, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kPartialFilterExprFieldName
+ << "' must be an object, but got " << typeName(indexSpecElem.type())};
}
// Just use the simple collator, even though the index may have a separate collation
@@ -427,10 +422,9 @@ StatusWith<BSONObj> validateIndexSpec(
const auto key = indexSpec.getObjectField(IndexDescriptor::kKeyPatternFieldName);
if (IndexNames::findPluginName(key) != IndexNames::WILDCARD) {
return {ErrorCodes::BadValue,
- str::stream() << "The field '" << IndexDescriptor::kPathProjectionFieldName
- << "' is only allowed in an '"
- << IndexNames::WILDCARD
- << "' index"};
+ str::stream()
+ << "The field '" << IndexDescriptor::kPathProjectionFieldName
+ << "' is only allowed in an '" << IndexNames::WILDCARD << "' index"};
}
if (indexSpecElem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
@@ -440,10 +434,10 @@ StatusWith<BSONObj> validateIndexSpec(
}
if (!key.hasField("$**")) {
return {ErrorCodes::FailedToParse,
- str::stream() << "The field '" << IndexDescriptor::kPathProjectionFieldName
- << "' is only allowed when '"
- << IndexDescriptor::kKeyPatternFieldName
- << "' is {\"$**\": ±1}"};
+ str::stream()
+ << "The field '" << IndexDescriptor::kPathProjectionFieldName
+ << "' is only allowed when '" << IndexDescriptor::kKeyPatternFieldName
+ << "' is {\"$**\": ±1}"};
}
if (indexSpecElem.embeddedObject().isEmpty()) {
@@ -486,10 +480,8 @@ StatusWith<BSONObj> validateIndexSpec(
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid index specification " << indexSpec
<< "; cannot create an index with the '"
- << IndexDescriptor::kCollationFieldName
- << "' option and "
- << IndexDescriptor::kIndexVersionFieldName
- << "="
+ << IndexDescriptor::kCollationFieldName << "' option and "
+ << IndexDescriptor::kIndexVersionFieldName << "="
<< static_cast<int>(*resolvedIndexVersion)};
}
diff --git a/src/mongo/db/catalog/index_key_validate_test.cpp b/src/mongo/db/catalog/index_key_validate_test.cpp
index bbb55b5281a..d61cbb8e0d7 100644
--- a/src/mongo/db/catalog/index_key_validate_test.cpp
+++ b/src/mongo/db/catalog/index_key_validate_test.cpp
@@ -127,8 +127,7 @@ TEST(IndexKeyValidateTest, KeyElementBooleanValueFailsForV2Indexes) {
ASSERT_EQ(ErrorCodes::CannotCreateIndex,
validateKeyPattern(BSON("a"
<< "2dsphere"
- << "b"
- << true),
+ << "b" << true),
IndexVersion::kV2));
}
@@ -137,8 +136,7 @@ TEST(IndexKeyValidateTest, KeyElementBooleanValueSucceedsForV1Indexes) {
ASSERT_OK(validateKeyPattern(BSON("x" << false), IndexVersion::kV1));
ASSERT_OK(validateKeyPattern(BSON("a"
<< "2dsphere"
- << "b"
- << true),
+ << "b" << true),
IndexVersion::kV1));
}
diff --git a/src/mongo/db/catalog/index_spec_validate_test.cpp b/src/mongo/db/catalog/index_spec_validate_test.cpp
index 560f4820579..6b472d09073 100644
--- a/src/mongo/db/catalog/index_spec_validate_test.cpp
+++ b/src/mongo/db/catalog/index_spec_validate_test.cpp
@@ -50,8 +50,8 @@
namespace mongo {
namespace {
-using index_key_validate::validateIndexSpec;
using index_key_validate::validateIdIndexSpec;
+using index_key_validate::validateIndexSpec;
using index_key_validate::validateIndexSpecCollation;
using unittest::EnsureFCV;
@@ -140,16 +140,14 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfNamespaceIsNotAString) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << 1),
+ << "ns" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::TypeMismatch,
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << BSONObj()),
+ << "ns" << BSONObj()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -181,8 +179,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfNamespaceDoesNotMatch) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.coll()),
+ << "ns" << kTestNamespace.coll()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -191,8 +188,7 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecWithNamespaceFilledInIfItIsNotPresen
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 1),
+ << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -200,10 +196,7 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecWithNamespaceFilledInIfItIsNotPresen
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 1)),
+ << "ns" << kTestNamespace.ns() << "v" << 1)),
sorted(result.getValue()));
// Verify that the index specification we returned is still considered valid.
@@ -215,10 +208,7 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecUnchangedIfNamespaceAndVersionArePre
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 1),
+ << "ns" << kTestNamespace.ns() << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -228,8 +218,7 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecUnchangedIfNamespaceAndVersionArePre
<< "indexName"
<< "ns"
<< "test.index_spec_validate"
- << "v"
- << 1)),
+ << "v" << 1)),
sorted(result.getValue()));
}
@@ -246,8 +235,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsNotANumber) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << BSONObj()),
+ << "v" << BSONObj()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -257,32 +245,28 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsNotRepresentableAsInt) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2.2),
+ << "v" << 2.2),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::BadValue,
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << std::nan("1")),
+ << "v" << std::nan("1")),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::BadValue,
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << std::numeric_limits<double>::infinity()),
+ << "v" << std::numeric_limits<double>::infinity()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::BadValue,
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << std::numeric_limits<long long>::max()),
+ << "v" << std::numeric_limits<long long>::max()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -292,8 +276,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsV0) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 0),
+ << "v" << 0),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -303,9 +286,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsUnsupported) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 3
- << "collation"
+ << "v" << 3 << "collation"
<< BSON("locale"
<< "en")),
kTestNamespace,
@@ -315,8 +296,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsUnsupported) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << -3LL),
+ << "v" << -3LL),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -325,8 +305,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionsThatAreAllowedForCreation) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 1),
+ << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -334,17 +313,13 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionsThatAreAllowedForCreation) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 1)),
+ << "ns" << kTestNamespace.ns() << "v" << 1)),
sorted(result.getValue()));
result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2LL),
+ << "v" << 2LL),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -352,10 +327,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionsThatAreAllowedForCreation) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2LL)),
+ << "ns" << kTestNamespace.ns() << "v" << 2LL)),
sorted(result.getValue()));
}
@@ -363,8 +335,7 @@ TEST(IndexSpecValidateTest, DefaultIndexVersionIsV2) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()),
+ << "ns" << kTestNamespace.ns()),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -372,10 +343,7 @@ TEST(IndexSpecValidateTest, DefaultIndexVersionIsV2) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)),
+ << "ns" << kTestNamespace.ns() << "v" << 2)),
sorted(result.getValue()));
// Verify that the index specification we returned is still considered valid.
@@ -387,8 +355,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionV1) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 1),
+ << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -396,10 +363,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionV1) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 1)),
+ << "ns" << kTestNamespace.ns() << "v" << 1)),
sorted(result.getValue()));
}
@@ -408,8 +372,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsNotAnObject) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "collation"
- << 1),
+ << "collation" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::TypeMismatch,
@@ -424,8 +387,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsNotAnObject) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "collation"
- << BSONArray()),
+ << "collation" << BSONArray()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -435,8 +397,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsEmpty) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "collation"
- << BSONObj()),
+ << "collation" << BSONObj()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -449,8 +410,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsPresentAndVersionIsLessTh
<< "collation"
<< BSON("locale"
<< "simple")
- << "v"
- << 1),
+ << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -459,9 +419,7 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2
- << "collation"
+ << "v" << 2 << "collation"
<< BSON("locale"
<< "simple")),
kTestNamespace,
@@ -471,11 +429,7 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
<< BSON("locale"
<< "simple"))),
sorted(result.getValue()));
@@ -483,9 +437,7 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) {
result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2
- << "collation"
+ << "v" << 2 << "collation"
<< BSON("unknownCollationOption" << true)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
@@ -494,11 +446,7 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
<< BSON("unknownCollationOption" << true))),
sorted(result.getValue()));
}
@@ -507,9 +455,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexSpecIfCollationIsPresentAndVersionIsEqua
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2
- << "collation"
+ << "v" << 2 << "collation"
<< BSON("locale"
<< "en")),
kTestNamespace,
@@ -519,11 +465,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexSpecIfCollationIsPresentAndVersionIsEqua
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
<< BSON("locale"
<< "en"))),
sorted(result.getValue()));
@@ -533,10 +475,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfUnknownFieldIsPresentInSpecV2) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2
- << "unknownField"
- << 1),
+ << "v" << 2 << "unknownField" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption, result);
@@ -546,10 +485,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfUnknownFieldIsPresentInSpecV1) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 1
- << "unknownField"
- << 1),
+ << "v" << 1 << "unknownField" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption, result);
@@ -559,95 +495,59 @@ TEST(IdIndexSpecValidateTest, ReturnsAnErrorIfKeyPatternIsIncorrectForIdIndex) {
ASSERT_EQ(ErrorCodes::BadValue,
validateIdIndexSpec(BSON("key" << BSON("_id" << -1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)));
+ << "ns" << kTestNamespace.ns() << "v" << 2)));
ASSERT_EQ(ErrorCodes::BadValue,
validateIdIndexSpec(BSON("key" << BSON("a" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)));
+ << "ns" << kTestNamespace.ns() << "v" << 2)));
}
TEST(IdIndexSpecValidateTest, ReturnsOKStatusIfKeyPatternCorrectForIdIndex) {
ASSERT_OK(validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "anyname"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)));
+ << "ns" << kTestNamespace.ns() << "v" << 2)));
}
TEST(IdIndexSpecValidateTest, ReturnsAnErrorIfFieldNotAllowedForIdIndex) {
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "background"
- << false)));
+ << "ns" << kTestNamespace.ns() << "v" << 2
+ << "background" << false)));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "unique"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "unique"
<< true)));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "partialFilterExpression"
- << BSON("a" << 5))));
+ << "ns" << kTestNamespace.ns() << "v" << 2
+ << "partialFilterExpression" << BSON("a" << 5))));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "sparse"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "sparse"
<< false)));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "expireAfterSeconds"
- << 3600)));
+ << "ns" << kTestNamespace.ns() << "v" << 2
+ << "expireAfterSeconds" << 3600)));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "storageEngine"
- << BSONObj())));
+ << "ns" << kTestNamespace.ns() << "v" << 2
+ << "storageEngine" << BSONObj())));
}
TEST(IdIndexSpecValidateTest, ReturnsOKStatusIfAllFieldsAllowedForIdIndex) {
- ASSERT_OK(validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
- << "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
- << BSON("locale"
- << "simple"))));
+ ASSERT_OK(
+ validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
+ << "_id_"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
+ << BSON("locale"
+ << "simple"))));
}
TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
@@ -659,10 +559,7 @@ TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
+ << "ns" << kTestNamespace.ns() << "v" << 2
<< "collation"
<< BSON("locale"
<< "mock_reverse_string")),
@@ -670,34 +567,21 @@ TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
ASSERT_OK(result.getStatus());
// We don't care about the order of the fields in the resulting index specification.
- ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
- << "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
- << BSON("locale"
- << "mock_reverse_string"
- << "caseLevel"
- << false
- << "caseFirst"
- << "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
- << "non-ignorable"
- << "maxVariable"
- << "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
- << "mock_version"))),
- sorted(result.getValue()));
+ ASSERT_BSONOBJ_EQ(
+ sorted(BSON("key" << BSON("field" << 1) << "name"
+ << "indexName"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
+ << BSON("locale"
+ << "mock_reverse_string"
+ << "caseLevel" << false << "caseFirst"
+ << "off"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "non-ignorable"
+ << "maxVariable"
+ << "punct"
+ << "normalization" << false << "backwards" << false << "version"
+ << "mock_version"))),
+ sorted(result.getValue()));
}
TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
@@ -709,10 +593,7 @@ TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
+ << "ns" << kTestNamespace.ns() << "v" << 2
<< "collation"
<< BSON("locale"
<< "simple")),
@@ -722,10 +603,7 @@ TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)),
+ << "ns" << kTestNamespace.ns() << "v" << 2)),
sorted(result.getValue()));
}
@@ -738,50 +616,33 @@ TEST(IndexSpecCollationValidateTest, FillsInCollationFieldWithCollectionDefaultI
auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2),
+ << "ns" << kTestNamespace.ns() << "v" << 2),
&defaultCollator);
ASSERT_OK(result.getStatus());
// We don't care about the order of the fields in the resulting index specification.
- ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
- << "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
- << BSON("locale"
- << "mock_reverse_string"
- << "caseLevel"
- << false
- << "caseFirst"
- << "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
- << "non-ignorable"
- << "maxVariable"
- << "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
- << "mock_version"))),
- sorted(result.getValue()));
+ ASSERT_BSONOBJ_EQ(
+ sorted(BSON("key" << BSON("field" << 1) << "name"
+ << "indexName"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
+ << BSON("locale"
+ << "mock_reverse_string"
+ << "caseLevel" << false << "caseFirst"
+ << "off"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "non-ignorable"
+ << "maxVariable"
+ << "punct"
+ << "normalization" << false << "backwards" << false << "version"
+ << "mock_version"))),
+ sorted(result.getValue()));
}
TEST(IndexSpecPartialFilterTest, FailsIfPartialFilterIsNotAnObject) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "partialFilterExpression"
- << 1),
+ << "partialFilterExpression" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus(), ErrorCodes::TypeMismatch);
@@ -802,8 +663,7 @@ TEST(IndexSpecPartialFilterTest, AcceptsValidPartialFilterExpression) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "partialFilterExpression"
- << BSON("a" << 1)),
+ << "partialFilterExpression" << BSON("a" << 1)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -811,25 +671,25 @@ TEST(IndexSpecPartialFilterTest, AcceptsValidPartialFilterExpression) {
TEST(IndexSpecWildcard, SucceedsWithInclusion) {
EnsureFCV guard(ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42);
- auto result = validateIndexSpec(kDefaultOpCtx,
- BSON("key" << BSON("$**" << 1) << "name"
- << "indexName"
- << "wildcardProjection"
- << BSON("a" << 1 << "b" << 1)),
- kTestNamespace,
- serverGlobalParams.featureCompatibility);
+ auto result =
+ validateIndexSpec(kDefaultOpCtx,
+ BSON("key" << BSON("$**" << 1) << "name"
+ << "indexName"
+ << "wildcardProjection" << BSON("a" << 1 << "b" << 1)),
+ kTestNamespace,
+ serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
}
TEST(IndexSpecWildcard, SucceedsWithExclusion) {
EnsureFCV guard(ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42);
- auto result = validateIndexSpec(kDefaultOpCtx,
- BSON("key" << BSON("$**" << 1) << "name"
- << "indexName"
- << "wildcardProjection"
- << BSON("a" << 0 << "b" << 0)),
- kTestNamespace,
- serverGlobalParams.featureCompatibility);
+ auto result =
+ validateIndexSpec(kDefaultOpCtx,
+ BSON("key" << BSON("$**" << 1) << "name"
+ << "indexName"
+ << "wildcardProjection" << BSON("a" << 0 << "b" << 0)),
+ kTestNamespace,
+ serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
}
@@ -895,13 +755,13 @@ TEST(IndexSpecWildcard, FailsWithImproperFeatureCompatabilityVersion) {
TEST(IndexSpecWildcard, FailsWithMixedProjection) {
EnsureFCV guard(ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42);
- auto result = validateIndexSpec(kDefaultOpCtx,
- BSON("key" << BSON("$**" << 1) << "name"
- << "indexName"
- << "wildcardProjection"
- << BSON("a" << 1 << "b" << 0)),
- kTestNamespace,
- serverGlobalParams.featureCompatibility);
+ auto result =
+ validateIndexSpec(kDefaultOpCtx,
+ BSON("key" << BSON("$**" << 1) << "name"
+ << "indexName"
+ << "wildcardProjection" << BSON("a" << 1 << "b" << 0)),
+ kTestNamespace,
+ serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), 40178);
}
@@ -923,8 +783,7 @@ TEST(IndexSpecWildcard, FailsWhenProjectionPluginNotWildcard) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("a" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << BSON("a" << 1)),
+ << "wildcardProjection" << BSON("a" << 1)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::BadValue);
@@ -935,8 +794,7 @@ TEST(IndexSpecWildcard, FailsWhenProjectionIsNotAnObject) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("$**" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << 4),
+ << "wildcardProjection" << 4),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::TypeMismatch);
@@ -947,8 +805,7 @@ TEST(IndexSpecWildcard, FailsWithEmptyProjection) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("$**" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << BSONObj()),
+ << "wildcardProjection" << BSONObj()),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
@@ -959,8 +816,7 @@ TEST(IndexSpecWildcard, FailsWhenInclusionWithSubpath) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("a.$**" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << BSON("a" << 1)),
+ << "wildcardProjection" << BSON("a" << 1)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
@@ -971,8 +827,7 @@ TEST(IndexSpecWildcard, FailsWhenExclusionWithSubpath) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("a.$**" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << BSON("b" << 0)),
+ << "wildcardProjection" << BSON("b" << 0)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
diff --git a/src/mongo/db/catalog/index_timestamp_helper.h b/src/mongo/db/catalog/index_timestamp_helper.h
index 581b1bd4740..9ae4457e409 100644
--- a/src/mongo/db/catalog/index_timestamp_helper.h
+++ b/src/mongo/db/catalog/index_timestamp_helper.h
@@ -55,6 +55,6 @@ void setGhostCommitTimestampForWrite(OperationContext* opCtx, const NamespaceStr
* also throw WriteConflictException.
*/
bool setGhostCommitTimestampForCatalogWrite(OperationContext* opCtx, const NamespaceString& nss);
-};
+}; // namespace IndexTimestampHelper
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index b3c0c7e00eb..05068e4a89f 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -135,8 +135,8 @@ void MultiIndexBlock::cleanUpAfterBuild(OperationContext* opCtx, Collection* col
replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
opCtx->getServiceContext()->getOpObserver()->onOpMessage(
opCtx,
- BSON("msg" << std::string(str::stream() << "Failing index builds. Coll: "
- << nss)));
+ BSON("msg" << std::string(str::stream()
+ << "Failing index builds. Coll: " << nss)));
} else {
// Simply get a timestamp to write with here; we can't write to the oplog.
repl::UnreplicatedWritesBlock uwb(opCtx);
@@ -182,7 +182,7 @@ MultiIndexBlock::OnInitFn MultiIndexBlock::kNoopOnInitFn =
MultiIndexBlock::OnInitFn MultiIndexBlock::makeTimestampedIndexOnInitFn(OperationContext* opCtx,
const Collection* coll) {
- return [ opCtx, ns = coll->ns() ](std::vector<BSONObj> & specs)->Status {
+ return [opCtx, ns = coll->ns()](std::vector<BSONObj>& specs) -> Status {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (opCtx->recoveryUnit()->getCommitTimestamp().isNull() &&
replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
@@ -213,12 +213,8 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(OperationContext* opCtx,
if (State::kAborted == _getState()) {
return {ErrorCodes::IndexBuildAborted,
str::stream() << "Index build aborted: " << _abortReason
- << ". Cannot initialize index builder: "
- << collection->ns()
- << " ("
- << collection->uuid()
- << "): "
- << indexSpecs.size()
+ << ". Cannot initialize index builder: " << collection->ns() << " ("
+ << collection->uuid() << "): " << indexSpecs.size()
<< " provided. First index spec: "
<< (indexSpecs.empty() ? BSONObj() : indexSpecs[0])};
}
@@ -725,8 +721,7 @@ Status MultiIndexBlock::commit(OperationContext* opCtx,
return {
ErrorCodes::IndexBuildAborted,
str::stream() << "Index build aborted: " << _abortReason
- << ". Cannot commit index builder: "
- << collection->ns()
+ << ". Cannot commit index builder: " << collection->ns()
<< (_collectionUUID ? (" (" + _collectionUUID->toString() + ")") : "")};
}
diff --git a/src/mongo/db/catalog/record_store_validate_adaptor.cpp b/src/mongo/db/catalog/record_store_validate_adaptor.cpp
index ec6d8bfb497..f86fa1e0bd5 100644
--- a/src/mongo/db/catalog/record_store_validate_adaptor.cpp
+++ b/src/mongo/db/catalog/record_store_validate_adaptor.cpp
@@ -57,7 +57,7 @@ KeyString::Builder makeWildCardMultikeyMetadataKeyString(const BSONObj& indexKey
multikeyMetadataOrd,
multikeyMetadataRecordId};
}
-}
+} // namespace
Status RecordStoreValidateAdaptor::validate(const RecordId& recordId,
const RecordData& record,
@@ -107,9 +107,9 @@ Status RecordStoreValidateAdaptor::validate(const RecordId& recordId,
{documentKeySet.begin(), documentKeySet.end()},
{multikeyMetadataKeys.begin(), multikeyMetadataKeys.end()},
multikeyPaths)) {
- std::string msg = str::stream() << "Index " << descriptor->indexName()
- << " is not multi-key but has more than one"
- << " key in document " << recordId;
+ std::string msg = str::stream()
+ << "Index " << descriptor->indexName() << " is not multi-key but has more than one"
+ << " key in document " << recordId;
ValidateResults& curRecordResults = (*_indexNsResultsMap)[descriptor->indexName()];
curRecordResults.errors.push_back(msg);
curRecordResults.valid = false;
@@ -183,9 +183,9 @@ void RecordStoreValidateAdaptor::traverseIndex(const IndexAccessMethod* iam,
}
if (results && _indexConsistency->getMultikeyMetadataPathCount(indexInfo) > 0) {
- results->errors.push_back(
- str::stream() << "Index '" << descriptor->indexName()
- << "' has one or more missing multikey metadata index keys");
+ results->errors.push_back(str::stream()
+ << "Index '" << descriptor->indexName()
+ << "' has one or more missing multikey metadata index keys");
results->valid = false;
}
@@ -258,9 +258,9 @@ void RecordStoreValidateAdaptor::validateIndexKeyCount(const IndexDescriptor* id
if (idx->isIdIndex() && numTotalKeys != numRecs) {
hasTooFewKeys = numTotalKeys < numRecs ? true : hasTooFewKeys;
- std::string msg = str::stream() << "number of _id index entries (" << numTotalKeys
- << ") does not match the number of documents in the index ("
- << numRecs << ")";
+ std::string msg = str::stream()
+ << "number of _id index entries (" << numTotalKeys
+ << ") does not match the number of documents in the index (" << numRecs << ")";
if (noErrorOnTooFewKeys && (numTotalKeys < numRecs)) {
results.warnings.push_back(msg);
} else {
diff --git a/src/mongo/db/catalog/record_store_validate_adaptor.h b/src/mongo/db/catalog/record_store_validate_adaptor.h
index 57fd4852fa1..0e64283bbc0 100644
--- a/src/mongo/db/catalog/record_store_validate_adaptor.h
+++ b/src/mongo/db/catalog/record_store_validate_adaptor.h
@@ -103,4 +103,4 @@ private:
IndexCatalog* _indexCatalog;
ValidateResultsMap* _indexNsResultsMap;
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 5b41b3e3539..352f2ca73b8 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -148,15 +148,8 @@ Status renameTargetCollectionToTmp(OperationContext* opCtx,
if (!tmpNameResult.isOK()) {
return tmpNameResult.getStatus().withContext(
str::stream() << "Cannot generate a temporary collection name for the target "
- << targetNs
- << " ("
- << targetUUID
- << ") so that the source"
- << sourceNs
- << " ("
- << sourceUUID
- << ") could be renamed to "
- << targetNs);
+ << targetNs << " (" << targetUUID << ") so that the source" << sourceNs
+ << " (" << sourceUUID << ") could be renamed to " << targetNs);
}
const auto& tmpName = tmpNameResult.getValue();
const bool stayTemp = true;
@@ -290,9 +283,10 @@ Status renameCollectionWithinDB(OperationContext* opCtx,
boost::optional<Lock::CollectionLock> targetLock;
// To prevent deadlock, always lock system.views collection in the end because concurrent
// view-related operations always lock system.views in the end.
- if (!source.isSystemDotViews() && (target.isSystemDotViews() ||
- ResourceId(RESOURCE_COLLECTION, source.ns()) <
- ResourceId(RESOURCE_COLLECTION, target.ns()))) {
+ if (!source.isSystemDotViews() &&
+ (target.isSystemDotViews() ||
+ ResourceId(RESOURCE_COLLECTION, source.ns()) <
+ ResourceId(RESOURCE_COLLECTION, target.ns()))) {
// To prevent deadlock, always lock source and target in ascending resourceId order.
sourceLock.emplace(opCtx, source, MODE_X);
targetLock.emplace(opCtx, target, MODE_X);
@@ -498,8 +492,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
if (!tmpNameResult.isOK()) {
return tmpNameResult.getStatus().withContext(
str::stream() << "Cannot generate temporary collection name to rename " << source
- << " to "
- << target);
+ << " to " << target);
}
const auto& tmpName = tmpNameResult.getValue();
@@ -586,7 +579,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
tmpColl->uuid(),
indexToCopy,
false // fromMigrate
- );
+ );
auto indexResult =
tmpIndexCatalog->createIndexOnEmptyCollection(opCtx, indexToCopy);
if (!indexResult.isOK()) {
@@ -647,7 +640,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
}
cursor->save();
// When this exits via success or WCE, we need to restore the cursor.
- ON_BLOCK_EXIT([ opCtx, ns = tmpName.ns(), &cursor ]() {
+ ON_BLOCK_EXIT([opCtx, ns = tmpName.ns(), &cursor]() {
writeConflictRetry(
opCtx, "retryRestoreCursor", ns, [&cursor] { cursor->restore(); });
});
@@ -861,9 +854,7 @@ Status renameCollectionForRollback(OperationContext* opCtx,
invariant(source->db() == target.db(),
str::stream() << "renameCollectionForRollback: source and target namespaces must "
"have the same database. source: "
- << *source
- << ". target: "
- << target);
+ << *source << ". target: " << target);
log() << "renameCollectionForRollback: rename " << *source << " (" << uuid << ") to " << target
<< ".";
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index 903461b9133..11da9388c03 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -329,8 +329,8 @@ void _createCollection(OperationContext* opCtx,
<< " does not exist.";
WriteUnitOfWork wuow(opCtx);
- ASSERT_TRUE(db->createCollection(opCtx, nss, options)) << "Failed to create collection "
- << nss << " due to unknown error.";
+ ASSERT_TRUE(db->createCollection(opCtx, nss, options))
+ << "Failed to create collection " << nss << " due to unknown error.";
wuow.commit();
});
@@ -414,11 +414,8 @@ void _createIndexOnEmptyCollection(OperationContext* opCtx,
ASSERT_TRUE(collection) << "Cannot create index on empty collection " << nss
<< " because collection " << nss << " does not exist.";
- auto indexInfoObj = BSON(
- "v" << int(IndexDescriptor::kLatestIndexVersion) << "key" << BSON("a" << 1) << "name"
- << indexName
- << "ns"
- << nss.ns());
+ auto indexInfoObj = BSON("v" << int(IndexDescriptor::kLatestIndexVersion) << "key"
+ << BSON("a" << 1) << "name" << indexName << "ns" << nss.ns());
auto indexCatalog = collection->getIndexCatalog();
WriteUnitOfWork wuow(opCtx);
@@ -723,8 +720,8 @@ TEST_F(RenameCollectionTest, RenameCollectionMakesTargetCollectionDropPendingIfD
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNss, options));
ASSERT_FALSE(_collectionExists(_opCtx.get(), _sourceNss))
<< "source collection " << _sourceNss << " still exists after successful rename";
- ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss)) << "target collection " << _targetNss
- << " missing after successful rename";
+ ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss))
+ << "target collection " << _targetNss << " missing after successful rename";
ASSERT_TRUE(_opObserver->onRenameCollectionCalled);
ASSERT(_opObserver->onRenameCollectionDropTarget);
@@ -748,8 +745,8 @@ TEST_F(RenameCollectionTest,
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNss, options));
ASSERT_FALSE(_collectionExists(_opCtx.get(), _sourceNss))
<< "source collection " << _sourceNss << " still exists after successful rename";
- ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss)) << "target collection " << _targetNss
- << " missing after successful rename";
+ ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss))
+ << "target collection " << _targetNss << " missing after successful rename";
ASSERT_TRUE(_opObserver->onRenameCollectionCalled);
ASSERT_FALSE(_opObserver->onRenameCollectionDropTarget);
@@ -835,9 +832,8 @@ TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsDropTargetByUUIDEvenIfSo
_createCollectionWithUUID(_opCtx.get(), _targetNss);
auto dropTargetUUID = _createCollectionWithUUID(_opCtx.get(), dropTargetNss);
auto uuidDoc = BSON("ui" << UUID::gen());
- auto cmd =
- BSON("renameCollection" << missingSourceNss.ns() << "to" << _targetNss.ns() << "dropTarget"
- << dropTargetUUID);
+ auto cmd = BSON("renameCollection" << missingSourceNss.ns() << "to" << _targetNss.ns()
+ << "dropTarget" << dropTargetUUID);
ASSERT_OK(renameCollectionForApplyOps(
_opCtx.get(), missingSourceNss.db().toString(), uuidDoc["ui"], cmd, {}));
ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss));
@@ -875,9 +871,8 @@ TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsDropTargetByUUIDEvenIfSo
auto dropTargetUUID = _createCollectionWithUUID(_opCtx.get(), dropTargetNss);
auto uuidDoc = BSON("ui" << _createCollectionWithUUID(_opCtx.get(), dropPendingNss));
- auto cmd =
- BSON("renameCollection" << dropPendingNss.ns() << "to" << _targetNss.ns() << "dropTarget"
- << dropTargetUUID);
+ auto cmd = BSON("renameCollection" << dropPendingNss.ns() << "to" << _targetNss.ns()
+ << "dropTarget" << dropTargetUUID);
repl::UnreplicatedWritesBlock uwb(_opCtx.get());
repl::OpTime renameOpTime = {Timestamp(Seconds(200), 1U), 1LL};
@@ -920,8 +915,8 @@ void _testRenameCollectionStayTemp(OperationContext* opCtx,
RenameCollectionOptions options;
options.stayTemp = stayTemp;
ASSERT_OK(renameCollection(opCtx, sourceNss, targetNss, options));
- ASSERT_FALSE(_collectionExists(opCtx, sourceNss)) << "source collection " << sourceNss
- << " still exists after successful rename";
+ ASSERT_FALSE(_collectionExists(opCtx, sourceNss))
+ << "source collection " << sourceNss << " still exists after successful rename";
if (!isSourceCollectionTemporary) {
ASSERT_FALSE(_isTempCollection(opCtx, targetNss))
@@ -1008,8 +1003,8 @@ void _testRenameCollectionAcrossDatabaseOplogEntries(
_insertDocument(opCtx, sourceNss, BSON("_id" << 0));
oplogEntries->clear();
if (forApplyOps) {
- auto cmd = BSON(
- "renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "dropTarget" << true);
+ auto cmd = BSON("renameCollection" << sourceNss.ns() << "to" << targetNss.ns()
+ << "dropTarget" << true);
ASSERT_OK(renameCollectionForApplyOps(opCtx, sourceNss.db().toString(), {}, cmd, {}));
} else {
RenameCollectionOptions options;
diff --git a/src/mongo/db/catalog/util/partitioned.h b/src/mongo/db/catalog/util/partitioned.h
index cf3dd0f3625..c449932f653 100644
--- a/src/mongo/db/catalog/util/partitioned.h
+++ b/src/mongo/db/catalog/util/partitioned.h
@@ -237,7 +237,7 @@ public:
KeyPartitioner()(partitioned_detail::getKey(value), nPartitions);
this->_partitionedContainer->_partitions[partitionId].insert(std::move(value));
}
- void insert(value_type)&& = delete;
+ void insert(value_type) && = delete;
/**
* Erases one entry from the partitioned structure, returns the number of entries removed.
diff --git a/src/mongo/db/catalog/util/partitioned_test.cpp b/src/mongo/db/catalog/util/partitioned_test.cpp
index 06de76bfc26..1cd235c95d6 100644
--- a/src/mongo/db/catalog/util/partitioned_test.cpp
+++ b/src/mongo/db/catalog/util/partitioned_test.cpp
@@ -237,7 +237,6 @@ TEST(PartitionedConcurrency, ShouldProtectConcurrentAccesses) {
AtomicWord<unsigned> ready{0};
for (size_t threadId = 1; threadId <= numThreads; ++threadId) {
auto workerThreadBody = [&, threadId, opsPerThread]() {
-
// Busy-wait until everybody is ready
ready.fetchAndAdd(1);
while (ready.load() < numThreads) {
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index 3fe1fee4bc2..e00052a7ddc 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -120,8 +120,7 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
str::stream()
<< "Unable to read from a snapshot due to pending collection catalog "
"changes; please retry the operation. Snapshot timestamp is "
- << mySnapshot->toString()
- << ". Collection minimum is "
+ << mySnapshot->toString() << ". Collection minimum is "
<< minSnapshot->toString(),
!minSnapshot || *mySnapshot >= *minSnapshot);
}
@@ -155,8 +154,7 @@ NamespaceString AutoGetCollection::resolveNamespaceStringOrUUID(OperationContext
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "UUID " << nsOrUUID.toString() << " specified in " << nsOrUUID.dbname()
- << " resolved to a collection in a different database: "
- << *resolvedNss,
+ << " resolved to a collection in a different database: " << *resolvedNss,
resolvedNss->db() == nsOrUUID.dbname());
return *resolvedNss;
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 8c1eb3c3bfb..456e0b0c98d 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -55,9 +55,7 @@ thread_local ServiceContext::UniqueClient currentClient;
void invariantNoCurrentClient() {
invariant(!haveClient(),
str::stream() << "Already have client on this thread: " //
- << '"'
- << Client::getCurrent()->desc()
- << '"');
+ << '"' << Client::getCurrent()->desc() << '"');
}
} // namespace
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 0ebcff34f09..3e93171254b 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -299,7 +299,7 @@ void _appendCursorStats(BSONObjBuilder& b) {
b.appendNumber("totalNoTimeout", cursorStatsOpenNoTimeout.get());
b.appendNumber("timedOut", cursorStatsTimedOut.get());
}
-}
+} // namespace
void startClientCursorMonitor() {
clientCursorMonitor.go();
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 4656d7b379a..1d19746cd7f 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -132,8 +132,7 @@ struct Cloner::Fun {
uassert(
ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from_collection.ns()
- << " to "
- << to_collection.ns(),
+ << " to " << to_collection.ns(),
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, to_collection));
@@ -162,13 +161,12 @@ struct Cloner::Fun {
db->userCreateNS(
opCtx, to_collection, collectionOptions, createDefaultIndexes, indexSpec),
str::stream() << "collection creation failed during clone ["
- << to_collection.ns()
- << "]");
+ << to_collection.ns() << "]");
wunit.commit();
collection = db->getCollection(opCtx, to_collection);
invariant(collection,
- str::stream() << "Missing collection during clone [" << to_collection.ns()
- << "]");
+ str::stream()
+ << "Missing collection during clone [" << to_collection.ns() << "]");
});
}
@@ -208,8 +206,8 @@ struct Cloner::Fun {
collection = db->getCollection(opCtx, to_collection);
uassert(28594,
- str::stream() << "Collection " << to_collection.ns()
- << " dropped while cloning",
+ str::stream()
+ << "Collection " << to_collection.ns() << " dropped while cloning",
collection != nullptr);
}
@@ -291,7 +289,7 @@ struct Cloner::Fun {
};
/* copy the specified collection
-*/
+ */
void Cloner::copy(OperationContext* opCtx,
const string& toDBName,
const NamespaceString& from_collection,
@@ -325,10 +323,7 @@ void Cloner::copy(OperationContext* opCtx,
uassert(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while cloning collection " << from_collection.ns()
- << " to "
- << to_collection.ns()
- << " with filter "
- << query.toString(),
+ << " to " << to_collection.ns() << " with filter " << query.toString(),
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, to_collection));
}
@@ -349,9 +344,7 @@ void Cloner::copyIndexes(OperationContext* opCtx,
uassert(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while copying indexes from " << from_collection.ns()
- << " to "
- << to_collection.ns()
- << " (Cloner)",
+ << " to " << to_collection.ns() << " (Cloner)",
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, to_collection));
@@ -379,11 +372,9 @@ void Cloner::copyIndexes(OperationContext* opCtx,
createDefaultIndexes,
fixIndexSpec(to_collection.db().toString(),
getIdIndexSpec(from_indexes))),
- str::stream() << "Collection creation failed while copying indexes from "
- << from_collection.ns()
- << " to "
- << to_collection.ns()
- << " (Cloner)");
+ str::stream()
+ << "Collection creation failed while copying indexes from "
+ << from_collection.ns() << " to " << to_collection.ns() << " (Cloner)");
wunit.commit();
collection = db->getCollection(opCtx, to_collection);
invariant(collection,
@@ -596,8 +587,7 @@ Status Cloner::createCollectionsForDb(
// we're trying to create already exists.
return Status(ErrorCodes::NamespaceExists,
str::stream() << "unsharded collection with same namespace "
- << nss.ns()
- << " already exists.");
+ << nss.ns() << " already exists.");
}
// If the collection is sharded and a collection with the same name already
@@ -612,13 +602,11 @@ Status Cloner::createCollectionsForDb(
if (clonedUUID == existingOpts.uuid)
return Status::OK();
- return Status(
- ErrorCodes::InvalidOptions,
- str::stream() << "sharded collection with same namespace " << nss.ns()
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream()
+ << "sharded collection with same namespace " << nss.ns()
<< " already exists, but UUIDs don't match. Existing UUID is "
- << existingOpts.uuid
- << " and new UUID is "
- << clonedUUID);
+ << existingOpts.uuid << " and new UUID is " << clonedUUID);
}
// If the collection does not already exist and is sharded, we create a new
diff --git a/src/mongo/db/collection_index_usage_tracker.cpp b/src/mongo/db/collection_index_usage_tracker.cpp
index 3f78b3c406a..0a694602c89 100644
--- a/src/mongo/db/collection_index_usage_tracker.cpp
+++ b/src/mongo/db/collection_index_usage_tracker.cpp
@@ -47,7 +47,7 @@ ServerStatusMetricField<Counter64> displayCollectionScans("queryExecutor.collect
&collectionScansCounter);
ServerStatusMetricField<Counter64> displayCollectionScansNonTailable(
"queryExecutor.collectionScans.nonTailable", &collectionScansNonTailableCounter);
-}
+} // namespace
CollectionIndexUsageTracker::CollectionIndexUsageTracker(ClockSource* clockSource)
: _clockSource(clockSource) {
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index e2fab366906..42b1ff38dfe 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -118,8 +118,7 @@ public:
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)) {
uasserted(ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from << " to "
- << to
- << " (as capped)");
+ << to << " (as capped)");
}
Database* const db = autoDb.getDb();
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index 8ab36f3ac03..64e59324a71 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -115,4 +115,4 @@ public:
}
};
static CompactCmd compactCmd;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/connection_status.cpp b/src/mongo/db/commands/connection_status.cpp
index cf470ebc6ae..04ca3a12f83 100644
--- a/src/mongo/db/commands/connection_status.cpp
+++ b/src/mongo/db/commands/connection_status.cpp
@@ -130,4 +130,4 @@ public:
return true;
}
} cmdConnectionStatus;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 545afc0c174..4afc346ce0c 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -51,9 +51,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Failpoint which causes to hang "count" cmd after acquiring the DB lock.
MONGO_FAIL_POINT_DEFINE(hangBeforeCollectionCount);
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 8ca80adf742..9a5817e9983 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -111,9 +111,9 @@ StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs(
if (kIndexesFieldName == cmdElemFieldName) {
if (cmdElem.type() != BSONType::Array) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << kIndexesFieldName
- << "' must be an array, but got "
- << typeName(cmdElem.type())};
+ str::stream()
+ << "The field '" << kIndexesFieldName << "' must be an array, but got "
+ << typeName(cmdElem.type())};
}
for (auto&& indexesElem : cmdElem.Obj()) {
@@ -166,16 +166,15 @@ StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs(
continue;
} else {
return {ErrorCodes::BadValue,
- str::stream() << "Invalid field specified for " << kCommandName << " command: "
- << cmdElemFieldName};
+ str::stream() << "Invalid field specified for " << kCommandName
+ << " command: " << cmdElemFieldName};
}
}
if (!hasIndexesField) {
return {ErrorCodes::FailedToParse,
str::stream() << "The '" << kIndexesFieldName
- << "' field is a required argument of the "
- << kCommandName
+ << "' field is a required argument of the " << kCommandName
<< " command"};
}
@@ -205,15 +204,13 @@ Status validateTTLOptions(OperationContext* opCtx, const BSONObj& cmdObj) {
str::stream() << "TTL index '" << kExpireAfterSeconds
<< "' option must be numeric, but received a type of '"
<< typeName(expireAfterSecondsElt.type())
- << "'. Index spec: "
- << indexObj};
+ << "'. Index spec: " << indexObj};
}
if (expireAfterSecondsElt.safeNumberLong() < 0) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "TTL index '" << kExpireAfterSeconds
- << "' option cannot be less than 0. Index spec: "
- << indexObj};
+ << "' option cannot be less than 0. Index spec: " << indexObj};
}
const std::string tooLargeErr = str::stream()
@@ -296,8 +293,7 @@ void checkUniqueIndexConstraints(OperationContext* opCtx,
const ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
uassert(ErrorCodes::CannotCreateIndex,
str::stream() << "cannot create unique index over " << newIdxKey
- << " with shard key pattern "
- << shardKeyPattern.toBSON(),
+ << " with shard key pattern " << shardKeyPattern.toBSON(),
shardKeyPattern.isUniqueIndexCompatible(newIdxKey));
}
@@ -398,8 +394,7 @@ Collection* getOrCreateCollection(OperationContext* opCtx,
auto collection = db->createCollection(opCtx, ns, options);
invariant(collection,
str::stream() << "Failed to create collection " << ns.ns()
- << " during index creation: "
- << redact(cmdObj));
+ << " during index creation: " << redact(cmdObj));
wunit.commit();
return collection;
});
@@ -695,9 +690,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
auto abortIndexFuture = indexBuildsCoord->abortIndexBuildByBuildUUID(
buildUUID,
str::stream() << "Index build interrupted due to change in replication state: "
- << buildUUID
- << ": "
- << ex.toString());
+ << buildUUID << ": " << ex.toString());
log() << "Index build aborted due to NotMaster error: " << buildUUID << ": "
<< abortIndexFuture.getNoThrow(opCtx);
throw;
@@ -717,9 +710,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
// All other errors should be forwarded to the caller with index build information included.
log() << "Index build failed: " << buildUUID << ": " << ex.toStatus();
ex.addContext(str::stream() << "Index build failed: " << buildUUID << ": Collection " << ns
- << " ( "
- << *collectionUUID
- << " )");
+ << " ( " << *collectionUUID << " )");
// Set last op on error to provide the client with a specific optime to read the state of
// the server when the createIndexes command failed.
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index 484cac84eb9..7a3a5728bf7 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -339,7 +339,7 @@ private:
return false;
}
- auto[prev, next] = getPrevAndNextUUIDs(opCtx, collection);
+ auto [prev, next] = getPrevAndNextUUIDs(opCtx, collection);
// Find and report collection metadata.
auto indices = collectionIndexInfo(opCtx, collection);
@@ -546,4 +546,4 @@ public:
MONGO_REGISTER_TEST_COMMAND(DbCheckCmd);
} // namespace
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 291c5c36112..a08a951f5a4 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -143,8 +143,8 @@ public:
repl::ReplicationCoordinator::modeNone) &&
(dbname == NamespaceString::kLocalDb)) {
uasserted(ErrorCodes::IllegalOperation,
- str::stream() << "Cannot drop '" << dbname
- << "' database while replication is active");
+ str::stream()
+ << "Cannot drop '" << dbname << "' database while replication is active");
}
BSONElement e = cmdObj.firstElement();
int p = (int)e.number();
diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp
index 68f07b43fd0..71642bec879 100644
--- a/src/mongo/db/commands/dbcommands_d.cpp
+++ b/src/mongo/db/commands/dbcommands_d.cpp
@@ -108,7 +108,7 @@ namespace {
/**
* Sets the profiling level, logging/profiling threshold, and logging/profiling sample rate for the
* given database.
-*/
+ */
class CmdProfile : public ProfileCmdBase {
public:
CmdProfile() = default;
@@ -205,8 +205,7 @@ public:
uassert(50847,
str::stream() << "The element that calls binDataClean() must be type of "
"BinData, but type of "
- << typeName(stateElem.type())
- << " found.",
+ << typeName(stateElem.type()) << " found.",
(stateElem.type() == BSONType::BinData));
int len;
@@ -293,8 +292,7 @@ public:
uassert(50849,
str::stream() << "The element that calls binDataClean() must be type "
"of BinData, but type of "
- << owned["data"].type()
- << " found.",
+ << owned["data"].type() << " found.",
owned["data"].type() == BSONType::BinData);
exec->saveState();
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 5153192234d..72e4734d3e0 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -157,8 +157,7 @@ public:
str::stream() << "$_internalReadAtClusterTime value must not be greater"
" than the last applied opTime. Requested clusterTime: "
<< targetClusterTime.toString()
- << "; last applied opTime: "
- << lastAppliedOpTime.toString(),
+ << "; last applied opTime: " << lastAppliedOpTime.toString(),
lastAppliedOpTime.getTimestamp() >= targetClusterTime);
// We aren't holding the global lock in intent mode, so it is possible for the global
@@ -173,8 +172,7 @@ public:
str::stream() << "$_internalReadAtClusterTime value must not be greater"
" than the all_durable timestamp. Requested clusterTime: "
<< targetClusterTime.toString()
- << "; all_durable timestamp: "
- << allDurableTime.toString(),
+ << "; all_durable timestamp: " << allDurableTime.toString(),
allDurableTime >= targetClusterTime);
// The $_internalReadAtClusterTime option causes any storage-layer cursors created
@@ -334,8 +332,7 @@ private:
str::stream() << "Unable to read from a snapshot due to pending collection"
" catalog changes; please retry the operation. Snapshot"
" timestamp is "
- << mySnapshot->toString()
- << ". Collection minimum timestamp is "
+ << mySnapshot->toString() << ". Collection minimum timestamp is "
<< minSnapshot->toString(),
!minSnapshot || *mySnapshot >= *minSnapshot);
} else {
diff --git a/src/mongo/db/commands/driverHelpers.cpp b/src/mongo/db/commands/driverHelpers.cpp
index 58f73648b4e..3a3ca1b8704 100644
--- a/src/mongo/db/commands/driverHelpers.cpp
+++ b/src/mongo/db/commands/driverHelpers.cpp
@@ -87,4 +87,4 @@ public:
return true;
}
} driverObjectIdTest;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 25702078378..7a6b530af8f 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -243,4 +243,4 @@ public:
return true;
}
} cmdReIndex;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index 5f6f034f4d2..277fb80b3c7 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -153,8 +153,7 @@ std::unique_ptr<CommandInvocation> CmdExplain::parse(OperationContext* opCtx,
if (auto innerDb = explainedObj["$db"]) {
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Mismatched $db in explain command. Expected " << dbname
- << " but got "
- << innerDb.checkAndGetStringData(),
+ << " but got " << innerDb.checkAndGetStringData(),
innerDb.checkAndGetStringData() == dbname);
}
auto explainedCommand = CommandHelpers::findCommand(explainedObj.firstElementFieldName());
diff --git a/src/mongo/db/commands/fail_point_cmd.cpp b/src/mongo/db/commands/fail_point_cmd.cpp
index 52ffb278a22..a50cc4ff06d 100644
--- a/src/mongo/db/commands/fail_point_cmd.cpp
+++ b/src/mongo/db/commands/fail_point_cmd.cpp
@@ -103,4 +103,4 @@ public:
}
};
MONGO_REGISTER_TEST_COMMAND(FaultInjectCmd);
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index cab903e15d3..ec1d65deb3b 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -215,12 +215,9 @@ void FeatureCompatibilityVersion::updateMinWireVersion() {
void FeatureCompatibilityVersion::_validateVersion(StringData version) {
uassert(40284,
str::stream() << "featureCompatibilityVersion must be '"
- << FeatureCompatibilityVersionParser::kVersion42
- << "' or '"
- << FeatureCompatibilityVersionParser::kVersion40
- << "'. See "
- << feature_compatibility_version_documentation::kCompatibilityLink
- << ".",
+ << FeatureCompatibilityVersionParser::kVersion42 << "' or '"
+ << FeatureCompatibilityVersionParser::kVersion40 << "'. See "
+ << feature_compatibility_version_documentation::kCompatibilityLink << ".",
version == FeatureCompatibilityVersionParser::kVersion42 ||
version == FeatureCompatibilityVersionParser::kVersion40);
}
diff --git a/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp b/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
index 6d68b8f417b..919a2aae34c 100644
--- a/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
@@ -56,10 +56,7 @@ StatusWith<std::string> FeatureCompatibilityVersionCommandParser::extractVersion
return {ErrorCodes::TypeMismatch,
str::stream() << "Command argument must be of type "
"String, but was of type "
- << typeName(versionElem.type())
- << " in: "
- << cmdObj
- << ". See "
+ << typeName(versionElem.type()) << " in: " << cmdObj << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< "."};
}
@@ -73,9 +70,7 @@ StatusWith<std::string> FeatureCompatibilityVersionCommandParser::extractVersion
uasserted(ErrorCodes::InvalidOptions,
str::stream() << "Unrecognized field found " << cmdElem.fieldNameStringData()
- << " in "
- << cmdObj
- << ". See "
+ << " in " << cmdObj << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
@@ -86,14 +81,9 @@ StatusWith<std::string> FeatureCompatibilityVersionCommandParser::extractVersion
version != FeatureCompatibilityVersionParser::kVersion40) {
return {ErrorCodes::BadValue,
str::stream() << "Invalid command argument. Expected '"
- << FeatureCompatibilityVersionParser::kVersion42
- << "' or '"
- << FeatureCompatibilityVersionParser::kVersion40
- << "', found "
- << version
- << " in: "
- << cmdObj
- << ". See "
+ << FeatureCompatibilityVersionParser::kVersion42 << "' or '"
+ << FeatureCompatibilityVersionParser::kVersion40 << "', found "
+ << version << " in: " << cmdObj << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< "."};
}
diff --git a/src/mongo/db/commands/feature_compatibility_version_documentation.h b/src/mongo/db/commands/feature_compatibility_version_documentation.h
index 7b51814b2ac..0be6c0b1f39 100644
--- a/src/mongo/db/commands/feature_compatibility_version_documentation.h
+++ b/src/mongo/db/commands/feature_compatibility_version_documentation.h
@@ -34,5 +34,5 @@ namespace feature_compatibility_version_documentation {
constexpr StringData kCompatibilityLink =
"http://dochub.mongodb.org/core/4.0-feature-compatibility"_sd;
constexpr StringData kUpgradeLink = "http://dochub.mongodb.org/core/4.0-upgrade-fcv"_sd;
-}
-}
+} // namespace feature_compatibility_version_documentation
+} // namespace mongo
diff --git a/src/mongo/db/commands/feature_compatibility_version_parser.cpp b/src/mongo/db/commands/feature_compatibility_version_parser.cpp
index 4a86d174468..0aa872b9041 100644
--- a/src/mongo/db/commands/feature_compatibility_version_parser.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version_parser.cpp
@@ -61,37 +61,26 @@ FeatureCompatibilityVersionParser::parse(const BSONObj& featureCompatibilityVers
continue;
} else if (fieldName == kVersionField || fieldName == kTargetVersionField) {
if (elem.type() != BSONType::String) {
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << fieldName << " must be of type String, but was of type "
- << typeName(elem.type())
- << ". Contents of "
- << kParameterName
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << fieldName << " must be of type String, but was of type "
+ << typeName(elem.type()) << ". Contents of " << kParameterName
<< " document in "
<< NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ << ": " << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
if (elem.String() != kVersion42 && elem.String() != kVersion40) {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Invalid value for " << fieldName << ", found "
- << elem.String()
- << ", expected '"
- << kVersion42
- << "' or '"
- << kVersion40
- << "'. Contents of "
- << kParameterName
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Invalid value for " << fieldName << ", found "
+ << elem.String() << ", expected '" << kVersion42 << "' or '"
+ << kVersion40 << "'. Contents of " << kParameterName
<< " document in "
<< NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ << ": " << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
@@ -102,15 +91,12 @@ FeatureCompatibilityVersionParser::parse(const BSONObj& featureCompatibilityVers
targetVersionString = elem.String();
}
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Unrecognized field '" << fieldName << "'. Contents of "
- << kParameterName
- << " document in "
- << NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Unrecognized field '" << fieldName << "'. Contents of "
+ << kParameterName << " document in "
+ << NamespaceString::kServerConfigurationNamespace.toString() << ": "
+ << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
@@ -126,28 +112,23 @@ FeatureCompatibilityVersionParser::parse(const BSONObj& featureCompatibilityVers
}
} else if (versionString == kVersion42) {
if (targetVersionString == kVersion42 || targetVersionString == kVersion40) {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Invalid state for " << kParameterName << " document in "
- << NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Invalid state for " << kParameterName << " document in "
+ << NamespaceString::kServerConfigurationNamespace.toString() << ": "
+ << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
} else {
version = ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42;
}
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Missing required field '" << kVersionField << "''. Contents of "
- << kParameterName
- << " document in "
- << NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Missing required field '" << kVersionField << "''. Contents of "
+ << kParameterName << " document in "
+ << NamespaceString::kServerConfigurationNamespace.toString() << ": "
+ << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 52250811dbd..88705768bcc 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -216,8 +216,8 @@ public:
} catch (DBException& error) {
if (error.code() == ErrorCodes::InvalidPipelineOperator) {
uasserted(ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Unsupported in view pipeline: "
- << error.what());
+ str::stream()
+ << "Unsupported in view pipeline: " << error.what());
}
throw;
}
@@ -329,8 +329,7 @@ public:
str::stream() << "$_internalReadAtClusterTime value must not be greater"
" than the last applied opTime. Requested clusterTime: "
<< targetClusterTime->toString()
- << "; last applied opTime: "
- << lastAppliedOpTime.toString(),
+ << "; last applied opTime: " << lastAppliedOpTime.toString(),
lastAppliedOpTime.getTimestamp() >= targetClusterTime);
// We aren't holding the global lock in intent mode, so it is possible for the
@@ -346,8 +345,7 @@ public:
" than the all_durable timestamp. Requested"
" clusterTime: "
<< targetClusterTime->toString()
- << "; all_durable timestamp: "
- << allDurableTime.toString(),
+ << "; all_durable timestamp: " << allDurableTime.toString(),
allDurableTime >= targetClusterTime);
// The $_internalReadAtClusterTime option causes any storage-layer cursors created
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 7bb1aa48ae5..3f3f2cc9374 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -64,7 +64,7 @@ namespace {
// Ensures that only one command is operating on fsyncLock state at a time. As a 'ResourceMutex',
// lock time will be reported for a given user operation.
Lock::ResourceMutex commandMutex("fsyncCommandMutex");
-}
+} // namespace
/**
* Maintains a global read lock while mongod is fsyncLocked.
@@ -437,4 +437,4 @@ MONGO_INITIALIZER(fsyncLockedForWriting)(InitializerContext* context) {
setLockedForWritingImpl([]() { return fsyncCmd.fsyncLocked(); });
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/fsync_locked.h b/src/mongo/db/commands/fsync_locked.h
index b5d4ccffd09..89e8f8c55ba 100644
--- a/src/mongo/db/commands/fsync_locked.h
+++ b/src/mongo/db/commands/fsync_locked.h
@@ -33,14 +33,14 @@
namespace mongo {
/**
-* Returns true if mongod is currently fsyncLocked.
-*/
+ * Returns true if mongod is currently fsyncLocked.
+ */
bool lockedForWriting();
/**
-* Sets the implementation for lockedForWriting(). Should be done once during startup in a
-* MONGO_INITIALIZER.
-*/
+ * Sets the implementation for lockedForWriting(). Should be done once during startup in a
+ * MONGO_INITIALIZER.
+ */
void setLockedForWritingImpl(std::function<bool()> impl);
} // namespace mongo
diff --git a/src/mongo/db/commands/generic_servers.cpp b/src/mongo/db/commands/generic_servers.cpp
index 6106c465537..9c0a0ad661f 100644
--- a/src/mongo/db/commands/generic_servers.cpp
+++ b/src/mongo/db/commands/generic_servers.cpp
@@ -240,9 +240,7 @@ public:
if (val.type() != String) {
uasserted(ErrorCodes::TypeMismatch,
str::stream() << "Argument to getLog must be of type String; found "
- << val.toString(false)
- << " of type "
- << typeName(val.type()));
+ << val.toString(false) << " of type " << typeName(val.type()));
}
string p = val.String();
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 9920db02e75..6f91ec1fca7 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -86,17 +86,14 @@ void validateLSID(OperationContext* opCtx, const GetMoreRequest& request, Client
uassert(50737,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in session "
- << *cursor->getSessionId()
+ << ", which was created in session " << *cursor->getSessionId()
<< ", without an lsid",
opCtx->getLogicalSessionId() || !cursor->getSessionId());
uassert(50738,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in session "
- << *cursor->getSessionId()
- << ", in session "
- << *opCtx->getLogicalSessionId(),
+ << ", which was created in session " << *cursor->getSessionId()
+ << ", in session " << *opCtx->getLogicalSessionId(),
!opCtx->getLogicalSessionId() || !cursor->getSessionId() ||
(opCtx->getLogicalSessionId() == cursor->getSessionId()));
}
@@ -116,17 +113,14 @@ void validateTxnNumber(OperationContext* opCtx,
uassert(50740,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in transaction "
- << *cursor->getTxnNumber()
+ << ", which was created in transaction " << *cursor->getTxnNumber()
<< ", without a txnNumber",
opCtx->getTxnNumber() || !cursor->getTxnNumber());
uassert(50741,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in transaction "
- << *cursor->getTxnNumber()
- << ", in transaction "
- << *opCtx->getTxnNumber(),
+ << ", which was created in transaction " << *cursor->getTxnNumber()
+ << ", in transaction " << *opCtx->getTxnNumber(),
!opCtx->getTxnNumber() || !cursor->getTxnNumber() ||
(*opCtx->getTxnNumber() == *cursor->getTxnNumber()));
}
@@ -434,8 +428,8 @@ public:
// Ensure that the client still has the privileges to run the originating command.
if (!authzSession->isAuthorizedForPrivileges(cursorPin->getOriginatingPrivileges())) {
uasserted(ErrorCodes::Unauthorized,
- str::stream() << "not authorized for getMore with cursor id "
- << _request.cursorid);
+ str::stream()
+ << "not authorized for getMore with cursor id " << _request.cursorid);
}
if (_request.nss != cursorPin->nss()) {
diff --git a/src/mongo/db/commands/hashcmd.cpp b/src/mongo/db/commands/hashcmd.cpp
index 2d69dcb6e9f..04b3c7f87ed 100644
--- a/src/mongo/db/commands/hashcmd.cpp
+++ b/src/mongo/db/commands/hashcmd.cpp
@@ -101,4 +101,4 @@ public:
}
};
MONGO_REGISTER_TEST_COMMAND(CmdHashElt);
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index b5c65b1e2be..48901a6ae58 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -110,8 +110,8 @@ namespace mongo {
using std::string;
using std::stringstream;
-using std::vector;
using std::unique_ptr;
+using std::vector;
IndexFilterCommand::IndexFilterCommand(const string& name, const string& helpText)
: BasicCommand(name), helpText(helpText) {}
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 52f4c49f2f2..2c34bb715c7 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -188,4 +188,4 @@ public:
return true;
}
} cmdListDatabases;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index aa64d182e01..d17b924223f 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -127,8 +127,8 @@ public:
}
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to list indexes on collection: "
- << nss.ns());
+ str::stream()
+ << "Not authorized to list indexes on collection: " << nss.ns());
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/db/commands/lock_info.cpp b/src/mongo/db/commands/lock_info.cpp
index da2538006dd..cb820b02fe8 100644
--- a/src/mongo/db/commands/lock_info.cpp
+++ b/src/mongo/db/commands/lock_info.cpp
@@ -105,4 +105,4 @@ public:
return true;
}
} cmdLockInfo;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 7d0d52be379..0deaf4d262f 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -656,9 +656,7 @@ void State::appendResults(BSONObjBuilder& final) {
BSONObj idKey = BSON("_id" << 1);
if (!_db.runCommand("admin",
BSON("splitVector" << _config.outputOptions.finalNamespace.ns()
- << "keyPattern"
- << idKey
- << "maxChunkSizeBytes"
+ << "keyPattern" << idKey << "maxChunkSizeBytes"
<< _config.splitInfo),
res)) {
uasserted(15921, str::stream() << "splitVector failed: " << res);
@@ -746,8 +744,7 @@ long long State::postProcessCollectionNonAtomic(OperationContext* opCtx,
if (!_db.runCommand("admin",
BSON("renameCollection" << _config.tempNamespace.ns() << "to"
<< _config.outputOptions.finalNamespace.ns()
- << "stayTemp"
- << _config.shardedFirstPass),
+ << "stayTemp" << _config.shardedFirstPass),
info)) {
uasserted(10076, str::stream() << "rename failed: " << info);
}
@@ -831,9 +828,7 @@ void State::insert(const NamespaceString& nss, const BSONObj& o) {
uassert(
ErrorCodes::PrimarySteppedDown,
str::stream() << "no longer primary while inserting mapReduce result into collection: "
- << nss
- << ": "
- << redact(o),
+ << nss << ": " << redact(o),
repl::ReplicationCoordinator::get(_opCtx)->canAcceptWritesFor(_opCtx, nss));
assertCollectionNotNull(nss, autoColl);
@@ -880,10 +875,8 @@ void State::_insertToInc(BSONObj& o) {
if (o.objsize() > BSONObjMaxUserSize) {
uasserted(ErrorCodes::BadValue,
str::stream() << "object to insert too large for incremental collection"
- << ". size in bytes: "
- << o.objsize()
- << ", max size: "
- << BSONObjMaxUserSize);
+ << ". size in bytes: " << o.objsize()
+ << ", max size: " << BSONObjMaxUserSize);
}
// TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261.
@@ -932,8 +925,9 @@ State::~State() {
_useIncremental ? _config.incLong : NamespaceString());
} catch (...) {
error() << "Unable to drop temporary collection created by mapReduce: "
- << _config.tempNamespace << ". This collection will be removed automatically "
- "the next time the server starts up. "
+ << _config.tempNamespace
+ << ". This collection will be removed automatically "
+ "the next time the server starts up. "
<< exceptionToStatus();
}
}
diff --git a/src/mongo/db/commands/mr.h b/src/mongo/db/commands/mr.h
index 5bc5d23a227..d5e32d1f9b5 100644
--- a/src/mongo/db/commands/mr.h
+++ b/src/mongo/db/commands/mr.h
@@ -159,7 +159,7 @@ private:
* result in "__returnValue"
* @param key OUT
* @param endSizeEstimate OUT
- */
+ */
void _reduce(const BSONList& values, BSONObj& key, int& endSizeEstimate);
JSFunction _func;
@@ -267,13 +267,13 @@ public:
void emit(const BSONObj& a);
/**
- * Checks the size of the transient in-memory results accumulated so far and potentially
- * runs reduce in order to compact them. If the data is still too large, it will be
- * spilled to the output collection.
- *
- * NOTE: Make sure that no DB locks are held, when calling this function, because it may
- * try to acquire write DB lock for the write to the output collection.
- */
+ * Checks the size of the transient in-memory results accumulated so far and potentially
+ * runs reduce in order to compact them. If the data is still too large, it will be
+ * spilled to the output collection.
+ *
+ * NOTE: Make sure that no DB locks are held, when calling this function, because it may
+ * try to acquire write DB lock for the write to the output collection.
+ */
void reduceAndSpillInMemoryStateIfNeeded();
/**
diff --git a/src/mongo/db/commands/mr_common.cpp b/src/mongo/db/commands/mr_common.cpp
index 90ebddcd6e6..4b5d2006054 100644
--- a/src/mongo/db/commands/mr_common.cpp
+++ b/src/mongo/db/commands/mr_common.cpp
@@ -156,5 +156,5 @@ bool mrSupportsWriteConcern(const BSONObj& cmd) {
return true;
}
}
-}
-}
+} // namespace mr
+} // namespace mongo
diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp
index 53735cdc5ea..53b06339448 100644
--- a/src/mongo/db/commands/mr_test.cpp
+++ b/src/mongo/db/commands/mr_test.cpp
@@ -74,11 +74,7 @@ void _compareOutputOptionField(const std::string& dbname,
if (actual == expected)
return;
FAIL(str::stream() << "parseOutputOptions(\"" << dbname << ", " << cmdObjStr << "): "
- << fieldName
- << ": Expected: "
- << expected
- << ". Actual: "
- << actual);
+ << fieldName << ": Expected: " << expected << ". Actual: " << actual);
}
/**
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index 09e7f52f688..4f2306ad1b2 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -306,8 +306,8 @@ public:
// Make sure we are allowed to change this parameter
if (!foundParameter->second->allowedToChangeAtRuntime()) {
- errmsg = str::stream() << "not allowed to change [" << parameterName
- << "] at runtime";
+ errmsg = str::stream()
+ << "not allowed to change [" << parameterName << "] at runtime";
return false;
}
@@ -365,9 +365,8 @@ public:
log() << "successfully set parameter " << parameterName << " to "
<< redact(parameter.toString(false))
- << (oldValue ? std::string(str::stream() << " (was "
- << redact(oldValue.toString(false))
- << ")")
+ << (oldValue ? std::string(str::stream()
+ << " (was " << redact(oldValue.toString(false)) << ")")
: "");
numSet++;
@@ -422,8 +421,8 @@ void LogComponentVerbosityServerParameter::append(OperationContext*,
Status LogComponentVerbosityServerParameter::set(const BSONElement& newValueElement) {
if (!newValueElement.isABSONObj()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "log component verbosity is not a BSON object: "
- << newValueElement);
+ str::stream()
+ << "log component verbosity is not a BSON object: " << newValueElement);
}
return setLogComponentVerbosity(newValueElement.Obj());
}
@@ -456,9 +455,7 @@ Status AutomationServiceDescriptorServerParameter::setFromString(const std::stri
if (str.size() > kMaxSize)
return {ErrorCodes::Overflow,
str::stream() << "Value for parameter automationServiceDescriptor"
- << " must be no more than "
- << kMaxSize
- << " bytes"};
+ << " must be no more than " << kMaxSize << " bytes"};
{
const stdx::lock_guard<stdx::mutex> lock(autoServiceDescriptorMutex);
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 4fc13e65c6a..6f821a7c50a 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -104,8 +104,8 @@ namespace mongo {
using std::string;
using std::stringstream;
-using std::vector;
using std::unique_ptr;
+using std::vector;
PlanCacheCommand::PlanCacheCommand(const string& name,
const string& helpText,
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 0d35b3ada49..291bc64eb9b 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -341,12 +341,12 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
// Check keys in cache before dropping {b: 1}
vector<BSONObj> shapesBefore = getShapes(planCache);
ASSERT_EQUALS(shapesBefore.size(), 2U);
- BSONObj shapeA = BSON(
- "query" << cqA->getQueryObj() << "sort" << cqA->getQueryRequest().getSort() << "projection"
- << cqA->getQueryRequest().getProj());
- BSONObj shapeB = BSON(
- "query" << cqB->getQueryObj() << "sort" << cqB->getQueryRequest().getSort() << "projection"
- << cqB->getQueryRequest().getProj());
+ BSONObj shapeA =
+ BSON("query" << cqA->getQueryObj() << "sort" << cqA->getQueryRequest().getSort()
+ << "projection" << cqA->getQueryRequest().getProj());
+ BSONObj shapeB =
+ BSON("query" << cqB->getQueryObj() << "sort" << cqB->getQueryRequest().getSort()
+ << "projection" << cqB->getQueryRequest().getProj());
ASSERT_TRUE(
std::find_if(shapesBefore.begin(), shapesBefore.end(), [&shapeA](const BSONObj& obj) {
auto filteredObj = obj.removeField("queryHash");
@@ -413,14 +413,11 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
vector<BSONObj> shapesBefore = getShapes(planCache);
ASSERT_EQUALS(shapesBefore.size(), 2U);
BSONObj shape = BSON("query" << cq->getQueryObj() << "sort" << cq->getQueryRequest().getSort()
- << "projection"
- << cq->getQueryRequest().getProj());
- BSONObj shapeWithCollation = BSON("query" << cqCollation->getQueryObj() << "sort"
- << cqCollation->getQueryRequest().getSort()
- << "projection"
- << cqCollation->getQueryRequest().getProj()
- << "collation"
- << cqCollation->getCollator()->getSpec().toBSON());
+ << "projection" << cq->getQueryRequest().getProj());
+ BSONObj shapeWithCollation = BSON(
+ "query" << cqCollation->getQueryObj() << "sort" << cqCollation->getQueryRequest().getSort()
+ << "projection" << cqCollation->getQueryRequest().getProj() << "collation"
+ << cqCollation->getCollator()->getSpec().toBSON());
ASSERT_TRUE(
std::find_if(shapesBefore.begin(), shapesBefore.end(), [&shape](const BSONObj& obj) {
auto filteredObj = obj.removeField("queryHash");
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index 9618543a0a9..3fd5a94da1c 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -115,4 +115,4 @@ public:
return true;
}
} repairCursorCmd;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 7293a0b01cb..3287294783c 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -299,8 +299,8 @@ StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNames
auto resolvedView = viewCatalog->resolveView(opCtx, involvedNs);
if (!resolvedView.isOK()) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Failed to resolve view '" << involvedNs.ns() << "': "
- << resolvedView.getStatus().toString()};
+ str::stream() << "Failed to resolve view '" << involvedNs.ns()
+ << "': " << resolvedView.getStatus().toString()};
}
resolvedNamespaces[involvedNs.coll()] = {resolvedView.getValue().getNamespace(),
diff --git a/src/mongo/db/commands/server_status_internal.cpp b/src/mongo/db/commands/server_status_internal.cpp
index 7556eee5e01..c981a65fd4b 100644
--- a/src/mongo/db/commands/server_status_internal.cpp
+++ b/src/mongo/db/commands/server_status_internal.cpp
@@ -83,4 +83,4 @@ void MetricTree::appendTo(BSONObjBuilder& b) const {
bb.done();
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/server_status_internal.h b/src/mongo/db/commands/server_status_internal.h
index cbd67fa0056..f9bde775db3 100644
--- a/src/mongo/db/commands/server_status_internal.h
+++ b/src/mongo/db/commands/server_status_internal.h
@@ -52,4 +52,4 @@ private:
std::map<std::string, MetricTree*> _subtrees;
std::map<std::string, ServerStatusMetric*> _metrics;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/server_status_metric.cpp b/src/mongo/db/commands/server_status_metric.cpp
index 8e8b749b729..9cac3b3ecd4 100644
--- a/src/mongo/db/commands/server_status_metric.cpp
+++ b/src/mongo/db/commands/server_status_metric.cpp
@@ -49,4 +49,4 @@ string ServerStatusMetric::_parseLeafName(const string& name) {
return name.substr(idx + 1);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/server_status_metric.h b/src/mongo/db/commands/server_status_metric.h
index 01c695ff046..f64327908e7 100644
--- a/src/mongo/db/commands/server_status_metric.h
+++ b/src/mongo/db/commands/server_status_metric.h
@@ -88,4 +88,4 @@ public:
private:
const T* _t;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/sleep_command.cpp b/src/mongo/db/commands/sleep_command.cpp
index 67b263588e0..594422fb77e 100644
--- a/src/mongo/db/commands/sleep_command.cpp
+++ b/src/mongo/db/commands/sleep_command.cpp
@@ -174,4 +174,4 @@ public:
};
MONGO_REGISTER_TEST_COMMAND(CmdSleep);
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp
index 3485f623c7d..01b3d7b8c74 100644
--- a/src/mongo/db/commands/snapshot_management.cpp
+++ b/src/mongo/db/commands/snapshot_management.cpp
@@ -128,4 +128,4 @@ public:
}
};
MONGO_REGISTER_TEST_COMMAND(CmdSetCommittedSnapshot);
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index f11cb7c1bd4..b4ca089ae3b 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -209,4 +209,4 @@ public:
};
MONGO_REGISTER_TEST_COMMAND(EmptyCapped);
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 645820be5ff..ae199964060 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -101,8 +101,7 @@ BSONArray roleSetToBSONArray(const stdx::unordered_set<RoleName>& roles) {
++it) {
const RoleName& role = *it;
rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
<< role.getDB()));
}
return rolesArrayBuilder.arr();
@@ -113,8 +112,7 @@ BSONArray rolesVectorToBSONArray(const std::vector<RoleName>& roles) {
for (std::vector<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
const RoleName& role = *it;
rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
<< role.getDB()));
}
return rolesArrayBuilder.arr();
@@ -174,14 +172,14 @@ Status checkOkayToGrantRolesToRole(OperationContext* opCtx,
const RoleName& roleToAdd = *it;
if (roleToAdd == role) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot grant role " << role.getFullName()
- << " to itself.");
+ str::stream()
+ << "Cannot grant role " << role.getFullName() << " to itself.");
}
if (role.getDB() != "admin" && roleToAdd.getDB() != role.getDB()) {
- return Status(
- ErrorCodes::InvalidRoleModification,
- str::stream() << "Roles on the \'" << role.getDB()
+ return Status(ErrorCodes::InvalidRoleModification,
+ str::stream()
+ << "Roles on the \'" << role.getDB()
<< "\' database cannot be granted roles from other databases");
}
@@ -431,14 +429,13 @@ Status insertRoleDocument(OperationContext* opCtx, const BSONObj& roleObj) {
* Updates the given role object with the given update modifier.
*/
Status updateRoleDocument(OperationContext* opCtx, const RoleName& role, const BSONObj& updateObj) {
- Status status = updateOneAuthzDocument(opCtx,
- AuthorizationManager::rolesCollectionNamespace,
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << role.getDB()),
- updateObj,
- false);
+ Status status = updateOneAuthzDocument(
+ opCtx,
+ AuthorizationManager::rolesCollectionNamespace,
+ BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()),
+ updateObj,
+ false);
if (status.isOK()) {
return status;
}
@@ -516,13 +513,12 @@ Status updatePrivilegeDocument(OperationContext* opCtx,
Status updatePrivilegeDocument(OperationContext* opCtx,
const UserName& user,
const BSONObj& updateObj) {
- const auto status = updatePrivilegeDocument(opCtx,
- user,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << user.getUser()
- << AuthorizationManager::USER_DB_FIELD_NAME
- << user.getDB()),
- updateObj);
+ const auto status = updatePrivilegeDocument(
+ opCtx,
+ user,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << user.getUser() << AuthorizationManager::USER_DB_FIELD_NAME << user.getDB()),
+ updateObj);
return status;
}
@@ -621,8 +617,7 @@ StatusWith<AuthzLockGuard> requireWritableAuthSchema28SCRAM(OperationContext* op
str::stream()
<< "User and role management commands require auth data to have "
<< "at least schema version "
- << AuthorizationManager::schemaVersion28SCRAM
- << " but found "
+ << AuthorizationManager::schemaVersion28SCRAM << " but found "
<< foundSchemaVersion);
}
status = writeAuthSchemaVersionIfNeeded(opCtx, authzManager, foundSchemaVersion);
@@ -658,8 +653,7 @@ StatusWith<AuthzLockGuard> requireReadableAuthSchema26Upgrade(OperationContext*
return Status(ErrorCodes::AuthSchemaIncompatible,
str::stream() << "The usersInfo and rolesInfo commands require auth data to "
<< "have at least schema version "
- << AuthorizationManager::schemaVersion26Upgrade
- << " but found "
+ << AuthorizationManager::schemaVersion26Upgrade << " but found "
<< foundSchemaVersion);
}
@@ -2022,9 +2016,9 @@ public:
&nMatched);
if (!status.isOK()) {
uassertStatusOK(useDefaultCode(status, ErrorCodes::UserModificationFailed)
- .withContext(str::stream() << "Failed to remove role "
- << roleName.getFullName()
- << " from all users"));
+ .withContext(str::stream()
+ << "Failed to remove role " << roleName.getFullName()
+ << " from all users"));
}
// Remove this role from all other roles
@@ -2045,9 +2039,9 @@ public:
if (!status.isOK()) {
uassertStatusOK(
useDefaultCode(status, ErrorCodes::RoleModificationFailed)
- .withContext(
- str::stream() << "Removed role " << roleName.getFullName()
- << " from all users but failed to remove from all roles"));
+ .withContext(str::stream()
+ << "Removed role " << roleName.getFullName()
+ << " from all users but failed to remove from all roles"));
}
audit::logDropRole(Client::getCurrent(), roleName);
@@ -2139,13 +2133,12 @@ public:
if (!status.isOK()) {
uassertStatusOK(useDefaultCode(status, ErrorCodes::UserModificationFailed)
.withContext(str::stream() << "Failed to remove roles from \""
- << dbname
- << "\" db from all users"));
+ << dbname << "\" db from all users"));
}
// Remove these roles from all other roles
- std::string sourceFieldName = str::stream() << "roles."
- << AuthorizationManager::ROLE_DB_FIELD_NAME;
+ std::string sourceFieldName = str::stream()
+ << "roles." << AuthorizationManager::ROLE_DB_FIELD_NAME;
status = updateAuthzDocuments(
opCtx,
AuthorizationManager::rolesCollectionNamespace,
@@ -2158,8 +2151,7 @@ public:
if (!status.isOK()) {
uassertStatusOK(useDefaultCode(status, ErrorCodes::RoleModificationFailed)
.withContext(str::stream() << "Failed to remove roles from \""
- << dbname
- << "\" db from all roles"));
+ << dbname << "\" db from all roles"));
}
audit::logDropAllRolesFromDatabase(Client::getCurrent(), dbname);
@@ -2580,9 +2572,7 @@ public:
BSONObj query =
db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db);
BSONObj fields = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << 1
- << AuthorizationManager::USER_DB_FIELD_NAME
- << 1);
+ << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
Status status =
queryAuthzDocument(opCtx,
@@ -2653,9 +2643,7 @@ public:
BSONObj query =
db.empty() ? BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db);
BSONObj fields = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << 1
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << 1);
+ << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
Status status =
queryAuthzDocument(opCtx,
diff --git a/src/mongo/db/commands/user_management_commands_common.cpp b/src/mongo/db/commands/user_management_commands_common.cpp
index 7abc55ab60a..08e4e5345c1 100644
--- a/src/mongo/db/commands/user_management_commands_common.cpp
+++ b/src/mongo/db/commands/user_management_commands_common.cpp
@@ -58,8 +58,8 @@ Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
for (size_t i = 0; i < roles.size(); ++i) {
if (!authzSession->isAuthorizedToGrantRole(roles[i])) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to grant role: "
- << roles[i].getFullName());
+ str::stream()
+ << "Not authorized to grant role: " << roles[i].getFullName());
}
}
@@ -83,8 +83,8 @@ Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
for (size_t i = 0; i < roles.size(); ++i) {
if (!authzSession->isAuthorizedToRevokeRole(roles[i])) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to revoke role: "
- << roles[i].getFullName());
+ str::stream()
+ << "Not authorized to revoke role: " << roles[i].getFullName());
}
}
return Status::OK();
@@ -129,8 +129,8 @@ Status checkAuthForCreateUserCommand(Client* client,
if (!authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forDatabaseName(args.userName.getDB()), ActionType::createUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create users on db: "
- << args.userName.getDB());
+ str::stream()
+ << "Not authorized to create users on db: " << args.userName.getDB());
}
status = checkAuthorizedToGrantRoles(authzSession, args.roles);
@@ -231,8 +231,8 @@ Status checkAuthForCreateRoleCommand(Client* client,
if (!authzSession->isAuthorizedToCreateRole(args)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create roles on db: "
- << args.roleName.getDB());
+ str::stream()
+ << "Not authorized to create roles on db: " << args.roleName.getDB());
}
status = checkAuthorizedToGrantRoles(authzSession, args.roles);
@@ -365,8 +365,8 @@ Status checkAuthForDropAllUsersFromDatabaseCommand(Client* client, const std::st
if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
ActionType::dropUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop users from the " << dbname
- << " database");
+ str::stream()
+ << "Not authorized to drop users from the " << dbname << " database");
}
return Status::OK();
}
@@ -415,8 +415,8 @@ Status checkAuthForUsersInfoCommand(Client* client,
if (!authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forDatabaseName(dbname), ActionType::viewUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to view users from the " << dbname
- << " database");
+ str::stream()
+ << "Not authorized to view users from the " << dbname << " database");
}
} else if (args.target == auth::UsersInfoArgs::Target::kGlobal) {
if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
@@ -462,8 +462,8 @@ Status checkAuthForDropAllRolesFromDatabaseCommand(Client* client, const std::st
if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
ActionType::dropRole)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop roles from the " << dbname
- << " database");
+ str::stream()
+ << "Not authorized to drop roles from the " << dbname << " database");
}
return Status::OK();
}
@@ -482,8 +482,8 @@ Status checkAuthForRolesInfoCommand(Client* client,
if (!authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forDatabaseName(dbname), ActionType::viewRole)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to view roles from the " << dbname
- << " database");
+ str::stream()
+ << "Not authorized to view roles from the " << dbname << " database");
}
} else {
for (size_t i = 0; i < args.roleNames.size(); ++i) {
@@ -496,8 +496,7 @@ Status checkAuthForRolesInfoCommand(Client* client,
ActionType::viewRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to view roles from the "
- << args.roleNames[i].getDB()
- << " database");
+ << args.roleNames[i].getDB() << " database");
}
}
}
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index a6505b464d5..f0eafc0d873 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -212,4 +212,4 @@ public:
}
} validateCmd;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index a98505ded6d..7fa3bb3c82b 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -111,7 +111,7 @@ void serializeReply(OperationContext* opCtx,
BSONSizeTracker upsertInfoSizeTracker;
BSONSizeTracker errorsSizeTracker;
- auto errorMessage = [&, errorSize = size_t(0) ](StringData rawMessage) mutable {
+ auto errorMessage = [&, errorSize = size_t(0)](StringData rawMessage) mutable {
// Start truncating error messages once both of these limits are exceeded.
constexpr size_t kErrorSizeTruncationMin = 1024 * 1024;
constexpr size_t kErrorCountTruncationMin = 2;
diff --git a/src/mongo/db/concurrency/d_concurrency_bm.cpp b/src/mongo/db/concurrency/d_concurrency_bm.cpp
index c375ffdd73f..95c6771badf 100644
--- a/src/mongo/db/concurrency/d_concurrency_bm.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_bm.cpp
@@ -52,8 +52,8 @@ public:
void makeKClientsWithLockers(int k) {
clients.reserve(k);
for (int i = 0; i < k; ++i) {
- auto client = getGlobalServiceContext()->makeClient(
- str::stream() << "test client for thread " << i);
+ auto client = getGlobalServiceContext()->makeClient(str::stream()
+ << "test client for thread " << i);
auto opCtx = client->makeOperationContext();
opCtx->swapLockState(std::make_unique<LockerImpl>());
clients.emplace_back(std::move(client), std::move(opCtx));
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 735b162dcaf..a8d93e8e0ee 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -2100,7 +2100,7 @@ public:
bool activeTransaction = true;
};
-}
+} // namespace
TEST_F(DConcurrencyTestFixture, TestGlobalLockAbandonsSnapshotWhenNotInWriteUnitOfWork) {
auto clients = makeKClientsWithLockers(1);
diff --git a/src/mongo/db/concurrency/lock_manager.cpp b/src/mongo/db/concurrency/lock_manager.cpp
index e3c7fc77809..5dcbfa07f28 100644
--- a/src/mongo/db/concurrency/lock_manager.cpp
+++ b/src/mongo/db/concurrency/lock_manager.cpp
@@ -102,7 +102,10 @@ uint32_t modeMask(LockMode mode) {
* Maps the LockRequest status to a human-readable string.
*/
static const char* LockRequestStatusNames[] = {
- "new", "granted", "waiting", "converting",
+ "new",
+ "granted",
+ "waiting",
+ "converting",
};
// Ensure we do not add new status types without updating the names array
diff --git a/src/mongo/db/concurrency/lock_manager.h b/src/mongo/db/concurrency/lock_manager.h
index ab113b48aad..50b2116d953 100644
--- a/src/mongo/db/concurrency/lock_manager.h
+++ b/src/mongo/db/concurrency/lock_manager.h
@@ -60,32 +60,32 @@ public:
~LockManager();
/**
- * Acquires lock on the specified resource in the specified mode and returns the outcome
- * of the operation. See the details for LockResult for more information on what the
- * different results mean.
- *
- * Locking the same resource twice increments the reference count of the lock so each call
- * to lock must be matched with a call to unlock with the same resource.
- *
- * @param resId Id of the resource to be locked.
- * @param request LockRequest structure on which the state of the request will be tracked.
- * This value cannot be NULL and the notify value must be set. If the
- * return value is not LOCK_WAITING, this pointer can be freed and will
- * not be used any more.
- *
- * If the return value is LOCK_WAITING, the notification method will be called
- * at some point into the future, when the lock becomes granted. If unlock is
- * called before the lock becomes granted, the notification will not be
- * invoked.
- *
- * If the return value is LOCK_WAITING, the notification object *must*
- * live at least until the notify method has been invoked or unlock has
- * been called for the resource it was assigned to. Failure to do so will
- * cause the lock manager to call into an invalid memory location.
- * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
- *
- * @return See comments for LockResult.
- */
+ * Acquires lock on the specified resource in the specified mode and returns the outcome
+ * of the operation. See the details for LockResult for more information on what the
+ * different results mean.
+ *
+ * Locking the same resource twice increments the reference count of the lock so each call
+ * to lock must be matched with a call to unlock with the same resource.
+ *
+ * @param resId Id of the resource to be locked.
+ * @param request LockRequest structure on which the state of the request will be tracked.
+ * This value cannot be NULL and the notify value must be set. If the
+ * return value is not LOCK_WAITING, this pointer can be freed and will
+ * not be used any more.
+ *
+ * If the return value is LOCK_WAITING, the notification method will be called
+ * at some point into the future, when the lock becomes granted. If unlock is
+ * called before the lock becomes granted, the notification will not be
+ * invoked.
+ *
+ * If the return value is LOCK_WAITING, the notification object *must*
+ * live at least until the notify method has been invoked or unlock has
+ * been called for the resource it was assigned to. Failure to do so will
+ * cause the lock manager to call into an invalid memory location.
+ * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
+ *
+ * @return See comments for LockResult.
+ */
LockResult lock(ResourceId resId, LockRequest* request, LockMode mode);
LockResult convert(ResourceId resId, LockRequest* request, LockMode newMode);
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 44557163c75..315a7389e04 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -328,8 +328,7 @@ void LockerImpl::reacquireTicket(OperationContext* opCtx) {
} else {
uassert(ErrorCodes::LockTimeout,
str::stream() << "Unable to acquire ticket with mode '" << _modeForTicket
- << "' within a max lock request timeout of '"
- << *_maxLockTimeout
+ << "' within a max lock request timeout of '" << *_maxLockTimeout
<< "' milliseconds.",
_acquireTicket(opCtx, _modeForTicket, Date_t::now() + *_maxLockTimeout));
}
@@ -369,8 +368,7 @@ LockResult LockerImpl::_lockGlobalBegin(OperationContext* opCtx, LockMode mode,
uassert(ErrorCodes::LockTimeout,
str::stream() << "Unable to acquire ticket with mode '" << _modeForTicket
<< "' within a max lock request timeout of '"
- << Date_t::now() - beforeAcquire
- << "' milliseconds.",
+ << Date_t::now() - beforeAcquire << "' milliseconds.",
_acquireTicket(opCtx, mode, deadline));
}
_modeForTicket = mode;
@@ -966,9 +964,9 @@ void LockerImpl::lockComplete(OperationContext* opCtx,
// Check if the lock acquisition has timed out. If we have an operation context and client
// we can provide additional diagnostics data.
if (waitTime == Milliseconds(0)) {
- std::string timeoutMessage = str::stream() << "Unable to acquire " << modeName(mode)
- << " lock on '" << resId.toString()
- << "' within " << timeout << ".";
+ std::string timeoutMessage = str::stream()
+ << "Unable to acquire " << modeName(mode) << " lock on '" << resId.toString()
+ << "' within " << timeout << ".";
if (opCtx && opCtx->getClient()) {
timeoutMessage = str::stream()
<< timeoutMessage << " opId: " << opCtx->getOpID()
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index d9330472c30..4c2d841d28d 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -945,11 +945,12 @@ namespace {
bool lockerInfoContainsLock(const Locker::LockerInfo& lockerInfo,
const ResourceId& resourceId,
const LockMode& mode) {
- return (1U == std::count_if(lockerInfo.locks.begin(),
- lockerInfo.locks.end(),
- [&resourceId, &mode](const Locker::OneLock& lock) {
- return lock.resourceId == resourceId && lock.mode == mode;
- }));
+ return (1U ==
+ std::count_if(lockerInfo.locks.begin(),
+ lockerInfo.locks.end(),
+ [&resourceId, &mode](const Locker::OneLock& lock) {
+ return lock.resourceId == resourceId && lock.mode == mode;
+ }));
}
} // namespace
diff --git a/src/mongo/db/concurrency/write_conflict_exception.cpp b/src/mongo/db/concurrency/write_conflict_exception.cpp
index c36b382b584..9eb18f8d349 100644
--- a/src/mongo/db/concurrency/write_conflict_exception.cpp
+++ b/src/mongo/db/concurrency/write_conflict_exception.cpp
@@ -48,10 +48,10 @@ WriteConflictException::WriteConflictException()
}
void WriteConflictException::logAndBackoff(int attempt, StringData operation, StringData ns) {
- mongo::logAndBackoff(
- ::mongo::logger::LogComponent::kWrite,
- logger::LogSeverity::Debug(1),
- static_cast<size_t>(attempt),
- str::stream() << "Caught WriteConflictException doing " << operation << " on " << ns);
-}
+ mongo::logAndBackoff(::mongo::logger::LogComponent::kWrite,
+ logger::LogSeverity::Debug(1),
+ static_cast<size_t>(attempt),
+ str::stream() << "Caught WriteConflictException doing " << operation
+ << " on " << ns);
}
+} // namespace mongo
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index ce9e60af38d..a7eb5519471 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -65,7 +65,14 @@ namespace {
// OP_QUERY find. The $orderby field is omitted because "orderby" (no dollar sign) is also allowed,
// and this requires special handling.
const std::vector<const char*> kDollarQueryModifiers = {
- "$hint", "$comment", "$max", "$min", "$returnKey", "$showDiskLoc", "$snapshot", "$maxTimeMS",
+ "$hint",
+ "$comment",
+ "$max",
+ "$min",
+ "$returnKey",
+ "$showDiskLoc",
+ "$snapshot",
+ "$maxTimeMS",
};
} // namespace
diff --git a/src/mongo/db/curop_failpoint_helpers.cpp b/src/mongo/db/curop_failpoint_helpers.cpp
index b5f9b9e9a36..6afbfb05be5 100644
--- a/src/mongo/db/curop_failpoint_helpers.cpp
+++ b/src/mongo/db/curop_failpoint_helpers.cpp
@@ -85,4 +85,4 @@ void CurOpFailpointHelpers::waitWhileFailPointEnabled(FailPoint* failPoint,
updateCurOpMsg(opCtx, origCurOpMsg);
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/curop_failpoint_helpers.h b/src/mongo/db/curop_failpoint_helpers.h
index e642f601811..a1143805951 100644
--- a/src/mongo/db/curop_failpoint_helpers.h
+++ b/src/mongo/db/curop_failpoint_helpers.h
@@ -64,4 +64,4 @@ public:
bool checkForInterrupt = false,
boost::optional<NamespaceString> nss = boost::none);
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 37082607dba..0f0a3c3123f 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -1008,8 +1008,8 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) {
if (auto svcExec = serviceContext->getServiceExecutor()) {
Status status = svcExec->shutdown(Seconds(10));
if (!status.isOK()) {
- log(LogComponent::kNetwork) << "Service executor failed to shutdown within timelimit: "
- << status.reason();
+ log(LogComponent::kNetwork)
+ << "Service executor failed to shutdown within timelimit: " << status.reason();
}
}
#endif
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index 8cf2455629b..c851fcfcc9c 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -122,8 +122,7 @@ AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* opCtx,
str::stream()
<< "Unable to read from a snapshot due to pending collection catalog "
"changes; please retry the operation. Snapshot timestamp is "
- << mySnapshot->toString()
- << ". Collection minimum is "
+ << mySnapshot->toString() << ". Collection minimum is "
<< minSnapshot->toString());
}
diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp
index 0b0671b4ecf..d5ce4367612 100644
--- a/src/mongo/db/dbdirectclient.cpp
+++ b/src/mongo/db/dbdirectclient.cpp
@@ -48,8 +48,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
namespace {
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index e9bfdf9dff1..630c0093a91 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -59,9 +59,9 @@
namespace mongo {
-using std::unique_ptr;
using std::set;
using std::string;
+using std::unique_ptr;
/* fetch a single object from collection ns that matches query
set your db SavedContext first
diff --git a/src/mongo/db/dbmessage.cpp b/src/mongo/db/dbmessage.cpp
index 75f2ecb8fbc..e7b63168e46 100644
--- a/src/mongo/db/dbmessage.cpp
+++ b/src/mongo/db/dbmessage.cpp
@@ -153,7 +153,7 @@ Message makeMessage(NetworkOp op, Func&& bodyBuilder) {
out.header().setLen(size);
return out;
}
-}
+} // namespace
Message makeInsertMessage(StringData ns, const BSONObj* objs, size_t count, int flags) {
return makeMessage(dbInsert, [&](BufBuilder& b) {
@@ -238,4 +238,4 @@ DbResponse replyToQuery(int queryResultFlags,
reply.bufBuilderForResults().appendBuf(data, size);
return DbResponse{reply.toQueryReply(queryResultFlags, nReturned, startingFrom, cursorId)};
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h
index 82a0f80a475..9002dd97607 100644
--- a/src/mongo/db/dbmessage.h
+++ b/src/mongo/db/dbmessage.h
@@ -96,7 +96,7 @@ class OperationContext;
namespace QueryResult {
#pragma pack(1)
/* see http://dochub.mongodb.org/core/mongowireprotocol
-*/
+ */
struct Layout {
MsgData::Layout msgdata;
int64_t cursorId;
@@ -298,7 +298,7 @@ enum QueryOptions {
QueryOption_CursorTailable = 1 << 1,
/** allow query of replica slave. normally these return an error except for namespace "local".
- */
+ */
QueryOption_SlaveOk = 1 << 2,
// findingStart mode is used to find the first operation of interest when
@@ -319,7 +319,7 @@ enum QueryOptions {
/** Use with QueryOption_CursorTailable. If we are at the end of the data, block for a while
* rather than returning no data. After a timeout period, we do return as normal.
- */
+ */
QueryOption_AwaitData = 1 << 5,
/** Stream the data down full blast in multiple "more" packages, on the assumption that the
diff --git a/src/mongo/db/dbmessage_test.cpp b/src/mongo/db/dbmessage_test.cpp
index b804e277407..73354253738 100644
--- a/src/mongo/db/dbmessage_test.cpp
+++ b/src/mongo/db/dbmessage_test.cpp
@@ -140,4 +140,4 @@ TEST(DBMessage1, GoodInsert2) {
}
-} // mongo namespace
+} // namespace mongo
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index f7fcdc170e2..6c60553a76e 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -38,8 +38,8 @@
namespace mongo {
-using std::unique_ptr;
using std::numeric_limits;
+using std::unique_ptr;
using std::vector;
// static
diff --git a/src/mongo/db/exec/change_stream_proxy.cpp b/src/mongo/db/exec/change_stream_proxy.cpp
index d4f5ae39c6f..3d6da255e53 100644
--- a/src/mongo/db/exec/change_stream_proxy.cpp
+++ b/src/mongo/db/exec/change_stream_proxy.cpp
@@ -92,8 +92,7 @@ BSONObj ChangeStreamProxyStage::_validateAndConvertToBSON(const Document& event)
"event makes it impossible to resume the stream from that point. Only "
"transformations that retain the unmodified _id field are allowed. "
"Expected: "
- << BSON("_id" << resumeToken)
- << " but found: "
+ << BSON("_id" << resumeToken) << " but found: "
<< (eventBSON["_id"] ? BSON("_id" << eventBSON["_id"]) : BSONObj()),
idField.binaryEqual(resumeToken));
return eventBSON;
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index dcd07553bc8..9ebd656512d 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -128,8 +128,7 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
Status status(ErrorCodes::CappedPositionLost,
str::stream() << "CollectionScan died due to failure to restore "
<< "tailable cursor position. "
- << "Last seen record id: "
- << _lastSeenId);
+ << "Last seen record id: " << _lastSeenId);
*out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
return PlanStage::FAILURE;
}
@@ -242,8 +241,7 @@ void CollectionScan::doRestoreStateRequiresCollection() {
uassert(ErrorCodes::CappedPositionLost,
str::stream()
<< "CollectionScan died due to position in capped collection being deleted. "
- << "Last seen record id: "
- << _lastSeenId,
+ << "Last seen record id: " << _lastSeenId,
couldRestore);
}
}
diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp
index 998235ca270..55379bd5550 100644
--- a/src/mongo/db/exec/count_scan.cpp
+++ b/src/mongo/db/exec/count_scan.cpp
@@ -62,7 +62,7 @@ BSONObj replaceBSONFieldNames(const BSONObj& replace, const BSONObj& fieldNames)
return bob.obj();
}
-}
+} // namespace
using std::unique_ptr;
using std::vector;
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index df28f3edcdb..ef1fb4e2aa9 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -95,7 +95,7 @@ struct StoredGeometry {
BSONElement element;
GeometryContainer geometry;
};
-}
+} // namespace
/**
* Find and parse all geometry elements on the appropriate field path from the document.
@@ -555,7 +555,7 @@ private:
// Owns matcher
const unique_ptr<MatchExpression> _matcher;
};
-}
+} // namespace
static double min2DBoundsIncrement(const GeoNearExpression& query,
const IndexDescriptor* twoDIndex) {
@@ -590,9 +590,9 @@ static R2Annulus projectBoundsToTwoDDegrees(R2Annulus sphereBounds) {
}
StatusWith<NearStage::CoveredInterval*> //
- GeoNear2DStage::nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) {
+GeoNear2DStage::nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) {
// The search is finished if we searched at least once and all the way to the edge
if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
return StatusWith<CoveredInterval*>(nullptr);
@@ -829,7 +829,7 @@ S2Region* buildS2Region(const R2Annulus& sphereBounds) {
// Takes ownership of caps
return new S2RegionIntersection(&regions);
}
-}
+} // namespace
// Estimate the density of data by search the nearest cells level by level around center.
class GeoNear2DSphereStage::DensityEstimator {
@@ -1010,9 +1010,9 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* opCtx,
}
StatusWith<NearStage::CoveredInterval*> //
- GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) {
+GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) {
// The search is finished if we searched at least once and all the way to the edge
if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
return StatusWith<CoveredInterval*>(nullptr);
diff --git a/src/mongo/db/exec/queued_data_stage_test.cpp b/src/mongo/db/exec/queued_data_stage_test.cpp
index bd1e0637e7d..7e07bf0ff44 100644
--- a/src/mongo/db/exec/queued_data_stage_test.cpp
+++ b/src/mongo/db/exec/queued_data_stage_test.cpp
@@ -123,4 +123,4 @@ TEST_F(QueuedDataStageTest, validateStats) {
unique_ptr<PlanStageStats> allStats(mock->getStats());
ASSERT_TRUE(stats->isEOF);
}
-}
+} // namespace
diff --git a/src/mongo/db/exec/record_store_fast_count.h b/src/mongo/db/exec/record_store_fast_count.h
index ab601569cd4..973165969be 100644
--- a/src/mongo/db/exec/record_store_fast_count.h
+++ b/src/mongo/db/exec/record_store_fast_count.h
@@ -75,4 +75,4 @@ private:
CountStats _specificStats;
};
-} // namepace mongo
+} // namespace mongo
diff --git a/src/mongo/db/exec/requires_collection_stage.cpp b/src/mongo/db/exec/requires_collection_stage.cpp
index 3d77b61870a..060722dbe14 100644
--- a/src/mongo/db/exec/requires_collection_stage.cpp
+++ b/src/mongo/db/exec/requires_collection_stage.cpp
@@ -61,8 +61,7 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() {
// a rename has happened during yield.
uassert(ErrorCodes::QueryPlanKilled,
str::stream() << "collection renamed from '" << _nss << "' to '" << *newNss
- << "'. UUID "
- << _collectionUUID,
+ << "'. UUID " << _collectionUUID,
*newNss == _nss);
// At this point we know that the collection name has not changed, and therefore we have
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 39ba8540c0f..73701afe43b 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -66,8 +66,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace {
@@ -283,11 +283,9 @@ public:
str::stream() << "Can't find index: " << keyPatternObj,
!indexes.empty());
uassert(ErrorCodes::AmbiguousIndexKeyPattern,
- str::stream() << indexes.size() << " matching indexes for key pattern: "
- << keyPatternObj
- << ". Conflicting indexes: "
- << indexes[0]->infoObj()
- << ", "
+ str::stream() << indexes.size()
+ << " matching indexes for key pattern: " << keyPatternObj
+ << ". Conflicting indexes: " << indexes[0]->infoObj() << ", "
<< indexes[1]->infoObj(),
indexes.size() == 1);
desc = indexes[0];
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index c945ccd2fcd..3bd267271da 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -44,9 +44,9 @@
namespace mongo {
+using std::string;
using std::unique_ptr;
using std::vector;
-using std::string;
using fts::FTSSpec;
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index 85bce89da40..ea314b78ab6 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -115,8 +115,7 @@ void assertRequiredPathsPresent(const mb::Document& document, const FieldRefSet&
uassert(ErrorCodes::NoSuchKey,
str::stream() << "After applying the update, the new document was missing the "
"required field '"
- << (*path).dottedField()
- << "'",
+ << (*path).dottedField() << "'",
elem.ok());
uassert(
ErrorCodes::NotSingleValueField,
diff --git a/src/mongo/db/exec/write_stage_common.h b/src/mongo/db/exec/write_stage_common.h
index 2f59e755c7a..1d3934443e6 100644
--- a/src/mongo/db/exec/write_stage_common.h
+++ b/src/mongo/db/exec/write_stage_common.h
@@ -54,5 +54,5 @@ bool ensureStillMatches(const Collection* collection,
WorkingSet* ws,
WorkingSetID id,
const CanonicalQuery* cq);
-}
-}
+} // namespace write_stage_common
+} // namespace mongo
diff --git a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
index 7adf5c74dcd..ad98dcfdc35 100644
--- a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
+++ b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
@@ -51,9 +51,7 @@ std::unique_ptr<DBClientBase> connect(StringData appName) {
void setWaitWithPinnedCursorDuringGetMoreBatchFailpoint(DBClientBase* conn, bool enable) {
auto cmdObj = BSON("configureFailPoint"
<< "waitWithPinnedCursorDuringGetMoreBatch"
- << "mode"
- << (enable ? "alwaysOn" : "off")
- << "data"
+ << "mode" << (enable ? "alwaysOn" : "off") << "data"
<< BSON("shouldNotdropLock" << true));
auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody("admin", cmdObj));
ASSERT_OK(getStatusFromCommandResult(reply->getCommandReply()));
@@ -63,8 +61,7 @@ void setWaitBeforeUnpinningOrDeletingCursorAfterGetMoreBatchFailpoint(DBClientBa
bool enable) {
auto cmdObj = BSON("configureFailPoint"
<< "waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch"
- << "mode"
- << (enable ? "alwaysOn" : "off"));
+ << "mode" << (enable ? "alwaysOn" : "off"));
auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody("admin", cmdObj));
ASSERT_OK(getStatusFromCommandResult(reply->getCommandReply()));
}
@@ -158,12 +155,9 @@ TEST(CurrentOpExhaustCursorTest, CanSeeEachExhaustCursorPseudoGetMoreInCurrentOp
// Generate a currentOp filter based on the cursorId and the cumulative nDocsReturned.
const auto curOpMatch = BSON("command.collection"
<< "exhaust_cursor_currentop"
- << "command.getMore"
- << queryCursor->getCursorId()
- << "msg"
+ << "command.getMore" << queryCursor->getCursorId() << "msg"
<< "waitWithPinnedCursorDuringGetMoreBatch"
- << "cursor.nDocsReturned"
- << i);
+ << "cursor.nDocsReturned" << i);
// Confirm that the exhaust getMore appears in the $currentOp output.
ASSERT(confirmCurrentOpContents(conn.get(), curOpMatch, parallelWaitTimeoutMS));
diff --git a/src/mongo/db/field_parser_test.cpp b/src/mongo/db/field_parser_test.cpp
index a99ee1b45e8..dfb0b7f0d7a 100644
--- a/src/mongo/db/field_parser_test.cpp
+++ b/src/mongo/db/field_parser_test.cpp
@@ -79,9 +79,7 @@ protected:
valLong = 1LL;
doc = BSON(aBool(valBool) << anArray(valArray) << anObj(valObj) << aDate(valDate)
- << aString(valString)
- << anOID(valOID)
- << aLong(valLong));
+ << aString(valString) << anOID(valOID) << aLong(valLong));
}
void tearDown() {}
@@ -215,9 +213,10 @@ TEST(ComplexExtraction, GetStringVector) {
BSONField<vector<string>> vectorField("testVector");
BSONObjBuilder bob;
- bob << vectorField() << BSON_ARRAY("a"
- << "b"
- << "c");
+ bob << vectorField()
+ << BSON_ARRAY("a"
+ << "b"
+ << "c");
BSONObj obj = bob.obj();
vector<string> parsedVector;
@@ -268,9 +267,10 @@ TEST(ComplexExtraction, RoundTripVector) {
BSONObj obj;
{
BSONObjBuilder bob;
- bob << vectorField() << BSON_ARRAY("a"
- << "b"
- << "c");
+ bob << vectorField()
+ << BSON_ARRAY("a"
+ << "b"
+ << "c");
obj = bob.obj();
}
@@ -297,12 +297,13 @@ TEST(ComplexExtraction, GetStringMap) {
BSONField<map<string, string>> mapField("testMap");
BSONObjBuilder bob;
- bob << mapField() << BSON("a"
- << "a"
- << "b"
- << "b"
- << "c"
- << "c");
+ bob << mapField()
+ << BSON("a"
+ << "a"
+ << "b"
+ << "b"
+ << "c"
+ << "c");
BSONObj obj = bob.obj();
map<string, string> parsedMap;
@@ -319,14 +320,15 @@ TEST(ComplexExtraction, GetObjectMap) {
BSONField<map<string, BSONObj>> mapField("testMap");
BSONObjBuilder bob;
- bob << mapField() << BSON("a" << BSON("a"
- << "a")
- << "b"
- << BSON("b"
- << "b")
- << "c"
- << BSON("c"
- << "c"));
+ bob << mapField()
+ << BSON("a" << BSON("a"
+ << "a")
+ << "b"
+ << BSON("b"
+ << "b")
+ << "c"
+ << BSON("c"
+ << "c"));
BSONObj obj = bob.obj();
map<string, BSONObj> parsedMap;
@@ -349,12 +351,11 @@ TEST(ComplexExtraction, GetBadMap) {
BSONField<map<string, string>> mapField("testMap");
BSONObjBuilder bob;
- bob << mapField() << BSON("a"
- << "a"
- << "b"
- << 123
- << "c"
- << "c");
+ bob << mapField()
+ << BSON("a"
+ << "a"
+ << "b" << 123 << "c"
+ << "c");
BSONObj obj = bob.obj();
map<string, string> parsedMap;
@@ -371,12 +372,13 @@ TEST(ComplexExtraction, RoundTripMap) {
BSONObj obj;
{
BSONObjBuilder bob;
- bob << mapField() << BSON("a"
- << "a"
- << "b"
- << "b"
- << "c"
- << "c");
+ bob << mapField()
+ << BSON("a"
+ << "a"
+ << "b"
+ << "b"
+ << "c"
+ << "c");
obj = bob.obj();
}
@@ -432,9 +434,7 @@ TEST(ComplexExtraction, GetBadNestedMap) {
BSONObj nestedMapObj = BSON("a"
<< "a"
- << "b"
- << 123
- << "c"
+ << "b" << 123 << "c"
<< "c");
BSONObjBuilder bob;
diff --git a/src/mongo/db/field_ref_set.cpp b/src/mongo/db/field_ref_set.cpp
index 2843f39d655..79741af83d3 100644
--- a/src/mongo/db/field_ref_set.cpp
+++ b/src/mongo/db/field_ref_set.cpp
@@ -36,8 +36,8 @@
namespace mongo {
-using std::vector;
using std::string;
+using std::vector;
namespace {
@@ -52,7 +52,7 @@ StringData safeFirstPart(const FieldRef* fieldRef) {
return fieldRef->getPart(0);
}
}
-}
+} // namespace
bool FieldRefSet::FieldRefPtrLessThan::operator()(const FieldRef* l, const FieldRef* r) const {
return *l < *r;
diff --git a/src/mongo/db/free_mon/free_mon_controller.h b/src/mongo/db/free_mon/free_mon_controller.h
index 92e1edab444..9307ab7570c 100644
--- a/src/mongo/db/free_mon/free_mon_controller.h
+++ b/src/mongo/db/free_mon/free_mon_controller.h
@@ -157,33 +157,33 @@ private:
private:
/**
- * Private enum to track state.
- *
- * +-----------------------------------------------------------+
- * | v
- * +-------------+ +----------+ +----------------+ +-------+
- * | kNotStarted | --> | kStarted | --> | kStopRequested | --> | kDone |
- * +-------------+ +----------+ +----------------+ +-------+
- */
+ * Private enum to track state.
+ *
+ * +-----------------------------------------------------------+
+ * | v
+ * +-------------+ +----------+ +----------------+ +-------+
+ * | kNotStarted | --> | kStarted | --> | kStopRequested | --> | kDone |
+ * +-------------+ +----------+ +----------------+ +-------+
+ */
enum class State {
/**
- * Initial state. Either start() or stop() can be called next.
- */
+ * Initial state. Either start() or stop() can be called next.
+ */
kNotStarted,
/**
- * start() has been called. stop() should be called next.
- */
+ * start() has been called. stop() should be called next.
+ */
kStarted,
/**
- * stop() has been called, and the background thread is in progress of shutting down
- */
+ * stop() has been called, and the background thread is in progress of shutting down
+ */
kStopRequested,
/**
- * Controller has been stopped.
- */
+ * Controller has been stopped.
+ */
kDone,
};
diff --git a/src/mongo/db/free_mon/free_mon_controller_test.cpp b/src/mongo/db/free_mon/free_mon_controller_test.cpp
index 0ad7a139dc6..090eed90cc9 100644
--- a/src/mongo/db/free_mon/free_mon_controller_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_controller_test.cpp
@@ -44,7 +44,6 @@
#include "mongo/base/deinitializer_context.h"
#include "mongo/bson/bson_validate.h"
#include "mongo/bson/bsonmisc.h"
-#include "mongo/bson/bsonmisc.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/client.h"
#include "mongo/db/free_mon/free_mon_op_observer.h"
@@ -119,8 +118,8 @@ public:
private:
/**
- * Private enum to ensure caller uses class correctly.
- */
+ * Private enum to ensure caller uses class correctly.
+ */
enum class State {
kNotStarted,
kStarted,
@@ -248,10 +247,9 @@ public:
if (_options.doSync) {
pf.promise.setFrom(doRegister(req));
} else {
- auto swSchedule =
- _threadPool->scheduleWork([ sharedPromise = std::move(pf.promise), req, this ](
+ auto swSchedule = _threadPool->scheduleWork(
+ [sharedPromise = std::move(pf.promise), req, this](
const executor::TaskExecutor::CallbackArgs& cbArgs) mutable {
-
sharedPromise.setWith([&] { return doRegister(req); });
});
@@ -295,10 +293,9 @@ public:
if (_options.doSync) {
pf.promise.setFrom(doMetrics(req));
} else {
- auto swSchedule =
- _threadPool->scheduleWork([ sharedPromise = std::move(pf.promise), req, this ](
+ auto swSchedule = _threadPool->scheduleWork(
+ [sharedPromise = std::move(pf.promise), req, this](
const executor::TaskExecutor::CallbackArgs& cbArgs) mutable {
-
sharedPromise.setWith([&] { return doMetrics(req); });
});
@@ -543,8 +540,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// max reporting interval
ASSERT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -555,8 +551,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 30 * 60 * 60 * 24LL))));
+ << "reportingInterval" << 30 * 60 * 60 * 24LL))));
// Positive: version 2
ASSERT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -567,8 +562,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Positive: empty registration id string
ASSERT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -579,8 +573,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: bad protocol version
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -591,8 +584,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: halt uploading
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -603,8 +595,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: large registartation id
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -614,20 +605,16 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: large URL
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
IDLParserErrorContext("foo"),
BSON("version" << 1LL << "haltMetricsUploading" << false << "id"
<< "mock123"
- << "informationalURL"
- << std::string(5000, 'b')
- << "message"
+ << "informationalURL" << std::string(5000, 'b') << "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: large message
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -636,10 +623,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "mock123"
<< "informationalURL"
<< "http://www.example.com/123"
- << "message"
- << std::string(5000, 'c')
- << "reportingInterval"
- << 1LL))));
+ << "message" << std::string(5000, 'c') << "reportingInterval" << 1LL))));
// Negative: too small a reporting interval
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -650,8 +634,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 0LL))));
+ << "reportingInterval" << 0LL))));
// Negative: too large a reporting interval
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -662,39 +645,36 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << (60LL * 60 * 24 * 30 + 1LL)))));
+ << "reportingInterval" << (60LL * 60 * 24 * 30 + 1LL)))));
}
// Positive: Ensure the response is validated correctly
TEST(FreeMonProcessorTest, TestMetricsResponseValidation) {
- ASSERT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
- IDLParserErrorContext("foo"),
+ ASSERT_OK(FreeMonProcessor::validateMetricsResponse(
+ FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
- BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
- << "msg456"
- << "reportingInterval"
- << 1LL))));
+ BSON("version" << 1LL << "haltMetricsUploading" << false
+ << "permanentlyDelete" << false << "id"
+ << "mock123"
+ << "informationalURL"
+ << "http://www.example.com/123"
+ << "message"
+ << "msg456"
+ << "reportingInterval" << 1LL))));
// Positive: Support version 2
- ASSERT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
- IDLParserErrorContext("foo"),
+ ASSERT_OK(FreeMonProcessor::validateMetricsResponse(
+ FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
- BSON("version" << 2LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
- << "msg456"
- << "reportingInterval"
- << 1LL))));
+ BSON("version" << 2LL << "haltMetricsUploading" << false
+ << "permanentlyDelete" << false << "id"
+ << "mock123"
+ << "informationalURL"
+ << "http://www.example.com/123"
+ << "message"
+ << "msg456"
+ << "reportingInterval" << 1LL))));
// Positive: Add resendRegistration
ASSERT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
@@ -707,10 +687,7 @@ TEST(FreeMonProcessorTest, TestMetricsResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL
- << "resendRegistration"
- << true))));
+ << "reportingInterval" << 1LL << "resendRegistration" << true))));
// Positive: max reporting interval
@@ -724,89 +701,74 @@ TEST(FreeMonProcessorTest, TestMetricsResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 60 * 60 * 24 * 30LL))));
+ << "reportingInterval" << 60 * 60 * 24 * 30LL))));
// Negative: bad protocol version
+ ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(
+ FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
+ BSON("version" << 42LL << "haltMetricsUploading" << false
+ << "permanentlyDelete" << false << "id"
+ << "mock123"
+ << "informationalURL"
+ << "http://www.example.com/123"
+ << "message"
+ << "msg456"
+ << "reportingInterval" << 1LL))));
+
+ // Negative: halt uploading
+ ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(
+ FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
+ BSON("version" << 1LL << "haltMetricsUploading" << true
+ << "permanentlyDelete" << false << "id"
+ << "mock123"
+ << "informationalURL"
+ << "http://www.example.com/123"
+ << "message"
+ << "msg456"
+ << "reportingInterval" << 1LL))));
+
+ // Negative: large registartation id
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
IDLParserErrorContext("foo"),
- BSON("version" << 42LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
+ BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
+ << "id" << std::string(5000, 'a') << "informationalURL"
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
- // Negative: halt uploading
+ // Negative: large URL
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
IDLParserErrorContext("foo"),
- BSON("version" << 1LL << "haltMetricsUploading" << true << "permanentlyDelete" << false
- << "id"
+ BSON("version" << 1LL << "haltMetricsUploading" << false
+
+ << "permanentlyDelete" << false << "id"
<< "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
+ << "informationalURL" << std::string(5000, 'b') << "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
- // Negative: large registartation id
+ // Negative: large message
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
IDLParserErrorContext("foo"),
BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
<< "id"
- << std::string(5000, 'a')
+ << "mock123"
<< "informationalURL"
<< "http://www.example.com/123"
- << "message"
- << "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "message" << std::string(5000, 'c') << "reportingInterval" << 1LL))));
- // Negative: large URL
+ // Negative: too small a reporting interval
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(
FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
BSON("version" << 1LL << "haltMetricsUploading" << false
-
- << "permanentlyDelete"
- << false
- << "id"
+ << "permanentlyDelete" << false << "id"
<< "mock123"
<< "informationalURL"
- << std::string(5000, 'b')
+ << "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
-
- // Negative: large message
- ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
- IDLParserErrorContext("foo"),
- BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
- << std::string(5000, 'c')
- << "reportingInterval"
- << 1LL))));
-
- // Negative: too small a reporting interval
- ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
- IDLParserErrorContext("foo"),
- BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
- << "msg456"
- << "reportingInterval"
- << 0LL))));
+ << "reportingInterval" << 0LL))));
// Negative: too large a reporting interval
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
@@ -818,8 +780,7 @@ TEST(FreeMonProcessorTest, TestMetricsResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << (60LL * 60 * 24 * 30 + 1LL)))));
+ << "reportingInterval" << (60LL * 60 * 24 * 30 + 1LL)))));
}
/**
diff --git a/src/mongo/db/free_mon/free_mon_message.h b/src/mongo/db/free_mon/free_mon_message.h
index 55b3091c34a..71a34dd84b4 100644
--- a/src/mongo/db/free_mon/free_mon_message.h
+++ b/src/mongo/db/free_mon/free_mon_message.h
@@ -67,8 +67,8 @@ enum class FreeMonMessageType {
AsyncRegisterFail,
/**
- * Unregister server from server command.
- */
+ * Unregister server from server command.
+ */
UnregisterCommand,
/**
@@ -117,24 +117,24 @@ enum class FreeMonMessageType {
*/
enum class RegistrationType {
/**
- * Do not register on start because it was not configured via commandline/config file.
- */
+ * Do not register on start because it was not configured via commandline/config file.
+ */
DoNotRegister,
/**
- * Register immediately on start since we are a standalone.
- */
+ * Register immediately on start since we are a standalone.
+ */
RegisterOnStart,
/**
- * Register after transition to becoming primary because we are in a replica set,
- * and Free Monitoring has been explicitly enabled.
- */
+ * Register after transition to becoming primary because we are in a replica set,
+ * and Free Monitoring has been explicitly enabled.
+ */
RegisterAfterOnTransitionToPrimary,
/**
- * As above, but only if we have been runtime enabled.
- */
+ * As above, but only if we have been runtime enabled.
+ */
RegisterAfterOnTransitionToPrimaryIfEnabled,
};
@@ -334,7 +334,7 @@ private:
/**
* For the messages that the caller needs to wait on, this provides a mechanism to wait on messages
* to be processed.
-*/
+ */
template <FreeMonMessageType typeT>
struct FreeMonWaitablePayloadForMessage {
using payload_type = void;
diff --git a/src/mongo/db/free_mon/free_mon_mongod.cpp b/src/mongo/db/free_mon/free_mon_mongod.cpp
index 008790e289a..fbf9255ddce 100644
--- a/src/mongo/db/free_mon/free_mon_mongod.cpp
+++ b/src/mongo/db/free_mon/free_mon_mongod.cpp
@@ -105,7 +105,6 @@ public:
reqObj.objdata(), reqObj.objdata() + reqObj.objsize());
return post("/register", data).then([](DataBuilder&& blob) {
-
if (!blob.size()) {
uasserted(ErrorCodes::FreeMonHttpTemporaryFailure, "Empty response received");
}
@@ -128,7 +127,6 @@ public:
reqObj.objdata(), reqObj.objdata() + reqObj.objsize());
return post("/metrics", data).then([](DataBuilder&& blob) {
-
if (!blob.size()) {
uasserted(ErrorCodes::FreeMonHttpTemporaryFailure, "Empty response received");
}
@@ -152,7 +150,7 @@ private:
std::string url(FreeMonEndpointURL + path.toString());
auto status = _executor->scheduleWork(
- [ promise = std::move(pf.promise), url = std::move(url), data = std::move(data), this ](
+ [promise = std::move(pf.promise), url = std::move(url), data = std::move(data), this](
const executor::TaskExecutor::CallbackArgs& cbArgs) mutable {
ConstDataRange cdr(data->data(), data->size());
try {
@@ -202,28 +200,11 @@ public:
// Try to filter server status to make it cheaper to collect. Harmless if we gather
// extra
BSON("serverStatus" << 1 << "storageEngine" << true << "extra_info" << false
- << "opLatencies"
- << false
- << "opcountersRepl"
- << false
- << "opcounters"
- << false
- << "transactions"
- << false
- << "connections"
- << false
- << "network"
- << false
- << "tcMalloc"
- << false
- << "network"
- << false
- << "wiredTiger"
- << false
- << "sharding"
- << false
- << "metrics"
- << false)) {}
+ << "opLatencies" << false << "opcountersRepl" << false
+ << "opcounters" << false << "transactions" << false
+ << "connections" << false << "network" << false << "tcMalloc"
+ << false << "network" << false << "wiredTiger" << false
+ << "sharding" << false << "metrics" << false)) {}
std::string name() const final {
return "storageEngine";
diff --git a/src/mongo/db/free_mon/free_mon_op_observer.cpp b/src/mongo/db/free_mon/free_mon_op_observer.cpp
index 09bfb3ff62c..29e380c8baa 100644
--- a/src/mongo/db/free_mon/free_mon_op_observer.cpp
+++ b/src/mongo/db/free_mon/free_mon_op_observer.cpp
@@ -42,8 +42,9 @@ bool isStandaloneOrPrimary(OperationContext* opCtx) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
const bool isReplSet =
replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
- return !isReplSet || (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
- repl::MemberState::RS_PRIMARY);
+ return !isReplSet ||
+ (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
+ repl::MemberState::RS_PRIMARY);
}
const auto getFreeMonDeleteState = OperationContext::declareDecoration<bool>();
diff --git a/src/mongo/db/free_mon/free_mon_options.h b/src/mongo/db/free_mon/free_mon_options.h
index 60203dc2b94..19f707e8b65 100644
--- a/src/mongo/db/free_mon/free_mon_options.h
+++ b/src/mongo/db/free_mon/free_mon_options.h
@@ -35,8 +35,8 @@
namespace mongo {
/**
-* Free Moniting Command line choices
-*/
+ * Free Moniting Command line choices
+ */
enum class EnableCloudStateEnum : std::int32_t {
kOn,
kOff,
diff --git a/src/mongo/db/free_mon/free_mon_processor.cpp b/src/mongo/db/free_mon/free_mon_processor.cpp
index 7013d72e244..8cb57bda42f 100644
--- a/src/mongo/db/free_mon/free_mon_processor.cpp
+++ b/src/mongo/db/free_mon/free_mon_processor.cpp
@@ -465,36 +465,29 @@ Status FreeMonProcessor::validateRegistrationResponse(const FreeMonRegistrationR
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream()
<< "Unexpected registration response protocol version, expected ("
- << kMinProtocolVersion
- << ", "
- << kMaxProtocolVersion
- << "), received '"
- << resp.getVersion()
- << "'");
+ << kMinProtocolVersion << ", " << kMaxProtocolVersion << "), received '"
+ << resp.getVersion() << "'");
}
if (resp.getId().size() >= kRegistrationIdMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Id is '" << resp.getId().size()
<< "' bytes in length, maximum allowed length is '"
- << kRegistrationIdMaxLength
- << "'");
+ << kRegistrationIdMaxLength << "'");
}
if (resp.getInformationalURL().size() >= kInformationalURLMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "InformationURL is '" << resp.getInformationalURL().size()
<< "' bytes in length, maximum allowed length is '"
- << kInformationalURLMaxLength
- << "'");
+ << kInformationalURLMaxLength << "'");
}
if (resp.getMessage().size() >= kInformationalMessageMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Message is '" << resp.getMessage().size()
<< "' bytes in length, maximum allowed length is '"
- << kInformationalMessageMaxLength
- << "'");
+ << kInformationalMessageMaxLength << "'");
}
if (resp.getUserReminder().is_initialized() &&
@@ -502,19 +495,15 @@ Status FreeMonProcessor::validateRegistrationResponse(const FreeMonRegistrationR
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "UserReminder is '" << resp.getUserReminder().get().size()
<< "' bytes in length, maximum allowed length is '"
- << kUserReminderMaxLength
- << "'");
+ << kUserReminderMaxLength << "'");
}
if (resp.getReportingInterval() < kReportingIntervalSecondsMin ||
resp.getReportingInterval() > kReportingIntervalSecondsMax) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Reporting Interval '" << resp.getReportingInterval()
- << "' must be in the range ["
- << kReportingIntervalSecondsMin
- << ","
- << kReportingIntervalSecondsMax
- << "]");
+ << "' must be in the range [" << kReportingIntervalSecondsMin
+ << "," << kReportingIntervalSecondsMax << "]");
}
// Did cloud ask us to stop uploading?
@@ -540,30 +529,24 @@ Status FreeMonProcessor::validateMetricsResponse(const FreeMonMetricsResponse& r
if (!(resp.getVersion() >= kMinProtocolVersion && resp.getVersion() <= kMaxProtocolVersion)) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Unexpected metrics response protocol version, expected ("
- << kMinProtocolVersion
- << ", "
- << kMaxProtocolVersion
- << "), received '"
- << resp.getVersion()
- << "'");
+ << kMinProtocolVersion << ", " << kMaxProtocolVersion
+ << "), received '" << resp.getVersion() << "'");
}
if (resp.getId().is_initialized() && resp.getId().get().size() >= kRegistrationIdMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Id is '" << resp.getId().get().size()
<< "' bytes in length, maximum allowed length is '"
- << kRegistrationIdMaxLength
- << "'");
+ << kRegistrationIdMaxLength << "'");
}
if (resp.getInformationalURL().is_initialized() &&
resp.getInformationalURL().get().size() >= kInformationalURLMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
- str::stream() << "InformationURL is '"
- << resp.getInformationalURL().get().size()
- << "' bytes in length, maximum allowed length is '"
- << kInformationalURLMaxLength
- << "'");
+ str::stream()
+ << "InformationURL is '" << resp.getInformationalURL().get().size()
+ << "' bytes in length, maximum allowed length is '"
+ << kInformationalURLMaxLength << "'");
}
if (resp.getMessage().is_initialized() &&
@@ -571,8 +554,7 @@ Status FreeMonProcessor::validateMetricsResponse(const FreeMonMetricsResponse& r
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Message is '" << resp.getMessage().get().size()
<< "' bytes in length, maximum allowed length is '"
- << kInformationalMessageMaxLength
- << "'");
+ << kInformationalMessageMaxLength << "'");
}
if (resp.getUserReminder().is_initialized() &&
@@ -580,19 +562,15 @@ Status FreeMonProcessor::validateMetricsResponse(const FreeMonMetricsResponse& r
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "UserReminder is '" << resp.getUserReminder().get().size()
<< "' bytes in length, maximum allowed length is '"
- << kUserReminderMaxLength
- << "'");
+ << kUserReminderMaxLength << "'");
}
if (resp.getReportingInterval() < kReportingIntervalSecondsMin ||
resp.getReportingInterval() > kReportingIntervalSecondsMax) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Reporting Interval '" << resp.getReportingInterval()
- << "' must be in the range ["
- << kReportingIntervalSecondsMin
- << ","
- << kReportingIntervalSecondsMax
- << "]");
+ << "' must be in the range [" << kReportingIntervalSecondsMin
+ << "," << kReportingIntervalSecondsMax << "]");
}
// Did cloud ask us to stop uploading?
diff --git a/src/mongo/db/free_mon/free_mon_queue_test.cpp b/src/mongo/db/free_mon/free_mon_queue_test.cpp
index ea38c7bad5c..ad6104c5126 100644
--- a/src/mongo/db/free_mon/free_mon_queue_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_queue_test.cpp
@@ -146,13 +146,11 @@ TEST_F(FreeMonQueueTest, TestQueueStop) {
auto swSchedule =
_mockThreadPool->scheduleWork([&](const executor::TaskExecutor::CallbackArgs& cbArgs) {
-
barrier.countDownAndWait();
// Try to dequeue from a stopped task queue
auto item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource());
ASSERT_FALSE(item.is_initialized());
-
});
ASSERT_OK(swSchedule.getStatus());
diff --git a/src/mongo/db/ftdc/compressor_test.cpp b/src/mongo/db/ftdc/compressor_test.cpp
index 143a6c4b391..509504037b7 100644
--- a/src/mongo/db/ftdc/compressor_test.cpp
+++ b/src/mongo/db/ftdc/compressor_test.cpp
@@ -70,18 +70,12 @@ TEST_F(FTDCCompressorTest, TestBasic) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42),
+ << "key1" << 33 << "key2" << 42),
Date_t());
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t());
ASSERT_HAS_SPACE(st);
@@ -190,112 +184,64 @@ TEST_F(FTDCCompressorTest, TestSchemaChanges) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42));
+ << "key1" << 33 << "key2" << 42));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
ASSERT_HAS_SPACE(st);
// Add Field
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47));
ASSERT_HAS_SPACE(st);
// Rename field
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key5"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key5" << 45 << "key3" << 47));
ASSERT_SCHEMA_CHANGED(st);
// Change type
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key5"
+ << "key1" << 34 << "key5"
<< "45"
- << "key3"
- << 47));
+ << "key3" << 47));
ASSERT_SCHEMA_CHANGED(st);
// Add Field
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47
- << "key7"
- << 34
- << "key9"
- << 45
- << "key13"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47 << "key7" << 34 << "key9"
+ << 45 << "key13" << 47));
ASSERT_SCHEMA_CHANGED(st);
// Remove Field
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 34
- << "key9"
- << 45
- << "key13"
- << 47));
+ << "key7" << 34 << "key9" << 45 << "key13" << 47));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 34
- << "key9"
- << 45
- << "key13"
- << 47));
+ << "key7" << 34 << "key9" << 45 << "key13" << 47));
ASSERT_HAS_SPACE(st);
// Start new batch
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 5));
+ << "key7" << 5));
ASSERT_SCHEMA_CHANGED(st);
// Change field to object
@@ -309,22 +255,19 @@ TEST_F(FTDCCompressorTest, TestSchemaChanges) {
// Change field from object to number
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 7));
+ << "key7" << 7));
ASSERT_SCHEMA_CHANGED(st);
// Change field from number to array
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << BSON_ARRAY(13 << 17)));
+ << "key7" << BSON_ARRAY(13 << 17)));
ASSERT_SCHEMA_CHANGED(st);
// Change field from array to number
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 19));
+ << "key7" << 19));
ASSERT_SCHEMA_CHANGED(st);
@@ -351,13 +294,11 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
auto st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 42));
+ << "int1" << 42));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 45));
+ << "int1" << 45));
ASSERT_HAS_SPACE(st);
// Add string field
@@ -365,8 +306,7 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
<< "joe"
<< "str2"
<< "smith"
- << "int1"
- << 47));
+ << "int1" << 47));
ASSERT_HAS_SPACE(st);
// Reset schema by renaming a int field
@@ -374,41 +314,34 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
<< "joe"
<< "str2"
<< "smith"
- << "int2"
- << 48));
+ << "int2" << 48));
ASSERT_SCHEMA_CHANGED(st);
// Remove string field
st = c.addSample(BSON("str1"
<< "joe"
- << "int2"
- << 49));
+ << "int2" << 49));
ASSERT_HAS_SPACE(st);
// Add string field as last element
st = c.addSample(BSON("str1"
<< "joe"
- << "int2"
- << 50
- << "str3"
+ << "int2" << 50 << "str3"
<< "bar"));
ASSERT_HAS_SPACE(st);
// Reset schema by renaming a int field
st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 51
- << "str3"
+ << "int1" << 51 << "str3"
<< "bar"));
ASSERT_SCHEMA_CHANGED(st);
// Remove string field as last element
st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 52));
+ << "int1" << 52));
ASSERT_HAS_SPACE(st);
@@ -419,8 +352,7 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
<< "smith"
<< "str3"
<< "foo"
- << "int1"
- << 53));
+ << "int1" << 53));
ASSERT_HAS_SPACE(st);
// Reset schema by renaming a int field
@@ -430,15 +362,13 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
<< "smith"
<< "str3"
<< "foo"
- << "int2"
- << 54));
+ << "int2" << 54));
ASSERT_SCHEMA_CHANGED(st);
// Remove 2 string fields
st = c.addSample(BSON("str1"
<< "joe"
- << "int2"
- << 55));
+ << "int2" << 55));
ASSERT_HAS_SPACE(st);
// Change string to number
@@ -448,8 +378,7 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
// Change number to string
st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 67));
+ << "int1" << 67));
ASSERT_SCHEMA_CHANGED(st);
}
@@ -459,24 +388,15 @@ TEST_F(FTDCCompressorTest, TestNumbersCompat) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42LL));
+ << "key1" << 33 << "key2" << 42LL));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34LL
- << "key2"
- << 45.0f));
+ << "key1" << 34LL << "key2" << 45.0f));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << static_cast<char>(32)
- << "key2"
- << 45.0F));
+ << "key1" << static_cast<char>(32) << "key2" << 45.0F));
ASSERT_HAS_SPACE(st);
}
@@ -500,50 +420,35 @@ TEST_F(FTDCCompressorTest, Types) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42LL));
+ << "key1" << 33 << "key2" << 42LL));
ASSERT_HAS_SPACE(st);
const char bytes[] = {0x1, 0x2, 0x3};
- BSONObj o = BSON("created" << DATENOW // date_t
- << "null"
- << BSONNULL // { a : null }
- << "undefined"
- << BSONUndefined // { a : undefined }
+ BSONObj o = BSON("created" << DATENOW // date_t
+ << "null" << BSONNULL // { a : null }
+ << "undefined" << BSONUndefined // { a : undefined }
<< "obj"
<< BSON( // nested object
"a"
<< "abc"
- << "b"
- << 123LL)
+ << "b" << 123LL)
<< "foo"
<< BSON_ARRAY("bar"
<< "baz"
- << "qux") // array of strings
- << "foo2"
- << BSON_ARRAY(5 << 6 << 7) // array of ints
- << "bindata"
- << BSONBinData(&bytes[0], 3, bdtCustom) // bindata
- << "oid"
- << OID("010203040506070809101112") // oid
- << "bool"
- << true // bool
- << "regex"
- << BSONRegEx("mongodb") // regex
- << "ref"
- << BSONDBRef("c", OID("010203040506070809101112")) // ref
- << "code"
- << BSONCode("func f() { return 1; }") // code
+ << "qux") // array of strings
+ << "foo2" << BSON_ARRAY(5 << 6 << 7) // array of ints
+ << "bindata" << BSONBinData(&bytes[0], 3, bdtCustom) // bindata
+ << "oid" << OID("010203040506070809101112") // oid
+ << "bool" << true // bool
+ << "regex" << BSONRegEx("mongodb") // regex
+ << "ref" << BSONDBRef("c", OID("010203040506070809101112")) // ref
+ << "code" << BSONCode("func f() { return 1; }") // code
<< "codewscope"
<< BSONCodeWScope("func f() { return 1; }",
BSON("c" << true)) // codew
- << "minkey"
- << MINKEY // minkey
- << "maxkey"
- << MAXKEY // maxkey
- );
+ << "minkey" << MINKEY // minkey
+ << "maxkey" << MAXKEY // maxkey
+ );
st = c.addSample(o);
ASSERT_SCHEMA_CHANGED(st);
@@ -553,17 +458,11 @@ TEST_F(FTDCCompressorTest, Types) {
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34LL
- << "key2"
- << 45.0f));
+ << "key1" << 34LL << "key2" << 45.0f));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << static_cast<char>(32)
- << "key2"
- << 45.0F));
+ << "key1" << static_cast<char>(32) << "key2" << 45.0F));
ASSERT_HAS_SPACE(st);
}
@@ -575,37 +474,25 @@ TEST_F(FTDCCompressorTest, TestFull) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42));
+ << "key1" << 33 << "key2" << 42));
ASSERT_HAS_SPACE(st);
for (size_t i = 0; i != FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << static_cast<long long int>(i * j)
- << "key2"
- << 45));
+ << "key1" << static_cast<long long int>(i * j) << "key2" << 45));
ASSERT_HAS_SPACE(st);
}
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
ASSERT_FULL(st);
// Add Value
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
ASSERT_HAS_SPACE(st);
}
}
diff --git a/src/mongo/db/ftdc/controller.h b/src/mongo/db/ftdc/controller.h
index 26d76b28ad7..5d1f2f5487a 100644
--- a/src/mongo/db/ftdc/controller.h
+++ b/src/mongo/db/ftdc/controller.h
@@ -150,14 +150,14 @@ private:
private:
/**
- * Private enum to track state.
- *
- * +-----------------------------------------------------------+
- * | v
- * +-------------+ +----------+ +----------------+ +-------+
- * | kNotStarted | --> | kStarted | --> | kStopRequested | --> | kDone |
- * +-------------+ +----------+ +----------------+ +-------+
- */
+ * Private enum to track state.
+ *
+ * +-----------------------------------------------------------+
+ * | v
+ * +-------------+ +----------+ +----------------+ +-------+
+ * | kNotStarted | --> | kStarted | --> | kStopRequested | --> | kDone |
+ * +-------------+ +----------+ +----------------+ +-------+
+ */
enum class State {
/**
* Initial state. Either start() or stop() can be called next.
diff --git a/src/mongo/db/ftdc/controller_test.cpp b/src/mongo/db/ftdc/controller_test.cpp
index a2a4c9b8abc..43872f1ccd5 100644
--- a/src/mongo/db/ftdc/controller_test.cpp
+++ b/src/mongo/db/ftdc/controller_test.cpp
@@ -119,8 +119,8 @@ public:
private:
/**
- * Private enum to ensure caller uses class correctly.
- */
+ * Private enum to ensure caller uses class correctly.
+ */
enum class State {
kNotStarted,
kStarted,
diff --git a/src/mongo/db/ftdc/file_manager.cpp b/src/mongo/db/ftdc/file_manager.cpp
index cdc7dcdb125..39ef2e133f7 100644
--- a/src/mongo/db/ftdc/file_manager.cpp
+++ b/src/mongo/db/ftdc/file_manager.cpp
@@ -76,8 +76,8 @@ StatusWith<std::unique_ptr<FTDCFileManager>> FTDCFileManager::create(
boost::filesystem::create_directories(dir, ec);
if (ec) {
return {ErrorCodes::NonExistentPath,
- str::stream() << "\"" << dir.generic_string() << "\" could not be created: "
- << ec.message()};
+ str::stream() << "\"" << dir.generic_string()
+ << "\" could not be created: " << ec.message()};
}
}
@@ -233,9 +233,9 @@ Status FTDCFileManager::trimDirectory(std::vector<boost::filesystem::path>& file
boost::filesystem::remove(*it, ec);
if (ec) {
return {ErrorCodes::NonExistentPath,
- str::stream() << "\"" << (*it).generic_string()
- << "\" could not be removed during trimming: "
- << ec.message()};
+ str::stream()
+ << "\"" << (*it).generic_string()
+ << "\" could not be removed during trimming: " << ec.message()};
}
}
}
diff --git a/src/mongo/db/ftdc/file_manager_test.cpp b/src/mongo/db/ftdc/file_manager_test.cpp
index 13493e984b7..a6cf6c6b38c 100644
--- a/src/mongo/db/ftdc/file_manager_test.cpp
+++ b/src/mongo/db/ftdc/file_manager_test.cpp
@@ -72,45 +72,34 @@ TEST_F(FTDCFileManagerTest, TestFull) {
// Test a large numbers of zeros, and incremental numbers in a full buffer
for (int j = 0; j < 10; j++) {
- ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
- BSON("name"
- << "joe"
- << "key1"
- << 3230792343LL
- << "key2"
- << 235135),
- Date_t()));
+ ASSERT_OK(
+ mgr->writeSampleAndRotateIfNeeded(client,
+ BSON("name"
+ << "joe"
+ << "key1" << 3230792343LL << "key2" << 235135),
+ Date_t()));
for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
- ASSERT_OK(
- mgr->writeSampleAndRotateIfNeeded(client,
- BSON("name"
- << "joe"
- << "key1"
- << static_cast<long long int>(i * j * 37)
- << "key2"
- << static_cast<long long int>(i *
- (645 << j))),
- Date_t()));
+ ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(
+ client,
+ BSON("name"
+ << "joe"
+ << "key1" << static_cast<long long int>(i * j * 37) << "key2"
+ << static_cast<long long int>(i * (645 << j))),
+ Date_t()));
}
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
}
@@ -175,9 +164,7 @@ TEST_F(FTDCFileManagerTest, TestNormalRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 3230792343LL
- << "key2"
+ << "key1" << 3230792343LL << "key2"
<< 235135),
Date_t()));
@@ -187,9 +174,7 @@ TEST_F(FTDCFileManagerTest, TestNormalRestart) {
client,
BSON("name"
<< "joe"
- << "key1"
- << static_cast<long long int>(i * j * 37)
- << "key2"
+ << "key1" << static_cast<long long int>(i * j * 37) << "key2"
<< static_cast<long long int>(i * (645 << j))),
Date_t()));
}
@@ -197,20 +182,14 @@ TEST_F(FTDCFileManagerTest, TestNormalRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
}
@@ -245,9 +224,7 @@ TEST_F(FTDCFileManagerTest, TestCorruptCrashRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 3230792343LL
- << "key2"
+ << "key1" << 3230792343LL << "key2"
<< 235135),
Date_t()));
@@ -257,9 +234,7 @@ TEST_F(FTDCFileManagerTest, TestCorruptCrashRestart) {
client,
BSON("name"
<< "joe"
- << "key1"
- << static_cast<long long int>(i * j * 37)
- << "key2"
+ << "key1" << static_cast<long long int>(i * j * 37) << "key2"
<< static_cast<long long int>(i * (645 << j))),
Date_t()));
}
@@ -267,20 +242,14 @@ TEST_F(FTDCFileManagerTest, TestCorruptCrashRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
}
@@ -311,23 +280,14 @@ TEST_F(FTDCFileManagerTest, TestNormalCrashInterim) {
BSONObj mdoc1 = BSON("name"
<< "some_metadata"
- << "key1"
- << 34
- << "something"
- << 98);
+ << "key1" << 34 << "something" << 98);
BSONObj sdoc1 = BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45);
+ << "key1" << 34 << "key2" << 45);
BSONObj sdoc2 = BSON("name"
<< "joe"
- << "key3"
- << 34
- << "key5"
- << 45);
+ << "key3" << 34 << "key5" << 45);
boost::filesystem::path fileOut;
diff --git a/src/mongo/db/ftdc/file_reader.cpp b/src/mongo/db/ftdc/file_reader.cpp
index 23d468aac0f..b71257e4278 100644
--- a/src/mongo/db/ftdc/file_reader.cpp
+++ b/src/mongo/db/ftdc/file_reader.cpp
@@ -195,8 +195,7 @@ StatusWith<BSONObj> FTDCFileReader::readDocument() {
if (readSize != _stream.gcount()) {
return {ErrorCodes::FileStreamFailed,
str::stream() << "Failed to read " << readSize << " bytes from file \""
- << _file.generic_string()
- << "\""};
+ << _file.generic_string() << "\""};
}
ConstDataRange cdr(_buffer.data(), _buffer.data() + bsonLength);
diff --git a/src/mongo/db/ftdc/file_writer.cpp b/src/mongo/db/ftdc/file_writer.cpp
index be4ea127b3b..24a6bf4cb17 100644
--- a/src/mongo/db/ftdc/file_writer.cpp
+++ b/src/mongo/db/ftdc/file_writer.cpp
@@ -210,8 +210,7 @@ Status FTDCFileWriter::flush(const boost::optional<ConstDataRange>& range, Date_
if (ec) {
return {ErrorCodes::NonExistentPath,
str::stream() << "\"" << _interimFile.generic_string()
- << "\" could not be removed during flush: "
- << ec.message()};
+ << "\" could not be removed during flush: " << ec.message()};
}
return Status::OK();
diff --git a/src/mongo/db/ftdc/file_writer_test.cpp b/src/mongo/db/ftdc/file_writer_test.cpp
index b8f6dbd9f70..16118eec6ce 100644
--- a/src/mongo/db/ftdc/file_writer_test.cpp
+++ b/src/mongo/db/ftdc/file_writer_test.cpp
@@ -60,16 +60,10 @@ TEST_F(FTDCFileTest, TestFileBasicMetadata) {
BSONObj doc1 = BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45);
+ << "key1" << 34 << "key2" << 45);
BSONObj doc2 = BSON("name"
<< "joe"
- << "key3"
- << 34
- << "key5"
- << 45);
+ << "key3" << 34 << "key5" << 45);
FTDCConfig config;
FTDCFileWriter writer(&config);
@@ -111,16 +105,10 @@ TEST_F(FTDCFileTest, TestFileBasicCompress) {
BSONObj doc1 = BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45);
+ << "key1" << 34 << "key2" << 45);
BSONObj doc2 = BSON("name"
<< "joe"
- << "key3"
- << 34
- << "key5"
- << 45);
+ << "key3" << 34 << "key5" << 45);
FTDCConfig config;
FTDCFileWriter writer(&config);
@@ -216,69 +204,41 @@ TEST_F(FTDCFileTest, TestSchemaChanges) {
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42));
+ << "key1" << 33 << "key2" << 42));
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
// Add Value
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47));
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47));
// Rename field
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key5"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key5" << 45 << "key3" << 47));
// Change type
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key5"
+ << "key1" << 34 << "key5"
<< "45"
- << "key3"
- << 47));
+ << "key3" << 47));
// RemoveField
c.addSample(BSON("name"
<< "joe"
<< "key5"
<< "45"
- << "key3"
- << 47));
+ << "key3" << 47));
}
// Test a full buffer
@@ -289,34 +249,22 @@ TEST_F(FTDCFileTest, TestFull) {
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42));
+ << "key1" << 33 << "key2" << 42));
for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << static_cast<long long int>(i * j)
- << "key2"
- << 45));
+ << "key1" << static_cast<long long int>(i * j) << "key2" << 45));
}
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
// Add Value
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
}
}
diff --git a/src/mongo/db/ftdc/ftdc_server.cpp b/src/mongo/db/ftdc/ftdc_server.cpp
index 92c2ca4fe2a..fc081316701 100644
--- a/src/mongo/db/ftdc/ftdc_server.cpp
+++ b/src/mongo/db/ftdc/ftdc_server.cpp
@@ -34,7 +34,6 @@
#include <boost/filesystem.hpp>
#include <fstream>
#include <memory>
-#include <memory>
#include "mongo/base/status.h"
#include "mongo/bson/bsonobjbuilder.h"
diff --git a/src/mongo/db/ftdc/ftdc_system_stats.h b/src/mongo/db/ftdc/ftdc_system_stats.h
index b5886fea819..bdc2e87984c 100644
--- a/src/mongo/db/ftdc/ftdc_system_stats.h
+++ b/src/mongo/db/ftdc/ftdc_system_stats.h
@@ -33,7 +33,6 @@
#include "mongo/base/status.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/ftdc/controller.h"
-#include "mongo/db/ftdc/controller.h"
namespace mongo {
diff --git a/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp b/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp
index 0338f5b7f17..08fedbb8e6b 100644
--- a/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp
+++ b/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp
@@ -68,7 +68,10 @@ static const std::vector<StringData> kMemKeys{
};
static const std::vector<StringData> kNetstatKeys{
- "Tcp:"_sd, "Ip:"_sd, "TcpExt:"_sd, "IpExt:"_sd,
+ "Tcp:"_sd,
+ "Ip:"_sd,
+ "TcpExt:"_sd,
+ "IpExt:"_sd,
};
/**
diff --git a/src/mongo/db/ftdc/util.cpp b/src/mongo/db/ftdc/util.cpp
index f745068fdea..9500bf62ecb 100644
--- a/src/mongo/db/ftdc/util.cpp
+++ b/src/mongo/db/ftdc/util.cpp
@@ -444,9 +444,7 @@ StatusWith<FTDCType> getBSONDocumentType(const BSONObj& obj) {
static_cast<FTDCType>(value) != FTDCType::kMetadata) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << std::string(kFTDCTypeField)
- << "' is not an expected value, found '"
- << value
- << "'"};
+ << "' is not an expected value, found '" << value << "'"};
}
return {static_cast<FTDCType>(value)};
diff --git a/src/mongo/db/ftdc/util.h b/src/mongo/db/ftdc/util.h
index 87defea80ea..4d47c610559 100644
--- a/src/mongo/db/ftdc/util.h
+++ b/src/mongo/db/ftdc/util.h
@@ -45,23 +45,23 @@ namespace mongo {
namespace FTDCBSONUtil {
/**
-* Type of FTDC document.
-*
-* NOTE: Persisted to disk via BSON Objects.
-*/
+ * Type of FTDC document.
+ *
+ * NOTE: Persisted to disk via BSON Objects.
+ */
enum class FTDCType : std::int32_t {
/**
- * A metadata document is composed of a header + an array of bson documents
- *
- * See createBSONMetadataChunkDocument
- */
+ * A metadata document is composed of a header + an array of bson documents
+ *
+ * See createBSONMetadataChunkDocument
+ */
kMetadata = 0,
/**
- * A metrics chunk is composed of a header + a compressed metric chunk.
- *
- * See createBSONMetricChunkDocument
- */
+ * A metrics chunk is composed of a header + a compressed metric chunk.
+ *
+ * See createBSONMetricChunkDocument
+ */
kMetricChunk = 1,
};
diff --git a/src/mongo/db/ftdc/varint.h b/src/mongo/db/ftdc/varint.h
index 08a064de2b4..66a4b30cab7 100644
--- a/src/mongo/db/ftdc/varint.h
+++ b/src/mongo/db/ftdc/varint.h
@@ -46,8 +46,8 @@ namespace mongo {
*/
struct FTDCVarInt {
/**
- * Maximum number of bytes an integer can compress to
- */
+ * Maximum number of bytes an integer can compress to
+ */
static const std::size_t kMaxSizeBytes64 = 10;
FTDCVarInt() = default;
diff --git a/src/mongo/db/fts/fts_element_iterator.cpp b/src/mongo/db/fts/fts_element_iterator.cpp
index ebca711dd2b..c9666f0834a 100644
--- a/src/mongo/db/fts/fts_element_iterator.cpp
+++ b/src/mongo/db/fts/fts_element_iterator.cpp
@@ -64,7 +64,7 @@ inline bool _matchPrefix(const string& dottedName, const string& weight) {
}
return str::startsWith(weight, dottedName + '.');
}
-}
+} // namespace
bool FTSElementIterator::more() {
//_currentValue = advance();
@@ -113,9 +113,10 @@ FTSIteratorValue FTSElementIterator::advance() {
// 1. parent path empty (top level): use the current field name
// 2. parent path non-empty and obj is an array: use the parent path
// 3. parent path non-empty and obj is a sub-doc: append field name to parent path
- string dottedName = (_frame._parentPath.empty() ? fieldName : _frame._isArray
- ? _frame._parentPath
- : _frame._parentPath + '.' + fieldName);
+ string dottedName =
+ (_frame._parentPath.empty()
+ ? fieldName
+ : _frame._isArray ? _frame._parentPath : _frame._parentPath + '.' + fieldName);
// Find lower bound of dottedName in _weights. lower_bound leaves us at the first
// weight that could possibly match or be a prefix of dottedName. And if this
diff --git a/src/mongo/db/fts/fts_index_format.cpp b/src/mongo/db/fts/fts_index_format.cpp
index d9fda7efa14..ab9950635fb 100644
--- a/src/mongo/db/fts/fts_index_format.cpp
+++ b/src/mongo/db/fts/fts_index_format.cpp
@@ -117,8 +117,8 @@ BSONElement extractNonFTSKeyElement(const BSONObj& obj, StringData path) {
dps::extractAllElementsAlongPath(
obj, path, indexedElements, expandArrayOnTrailingField, &arrayComponents);
uassert(ErrorCodes::CannotBuildIndexKeys,
- str::stream() << "Field '" << path << "' of text index contains an array in document: "
- << obj,
+ str::stream() << "Field '" << path
+ << "' of text index contains an array in document: " << obj,
arrayComponents.empty());
// Since there aren't any arrays, there cannot be more than one extracted element on 'path'.
@@ -241,5 +241,5 @@ void FTSIndexFormat::_appendIndexKey(BSONObjBuilder& b,
b.append("", weight);
}
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_index_format.h b/src/mongo/db/fts/fts_index_format.h
index cff73d5caad..dd83e8603a8 100644
--- a/src/mongo/db/fts/fts_index_format.h
+++ b/src/mongo/db/fts/fts_index_format.h
@@ -70,5 +70,5 @@ private:
const std::string& term,
TextIndexVersion textIndexVersion);
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_index_format_test.cpp b/src/mongo/db/fts/fts_index_format_test.cpp
index b847d16dd9d..c9d6779e639 100644
--- a/src/mongo/db/fts/fts_index_format_test.cpp
+++ b/src/mongo/db/fts/fts_index_format_test.cpp
@@ -68,14 +68,12 @@ TEST(FTSIndexFormat, Simple1) {
TEST(FTSIndexFormat, ExtraBack1) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text"
- << "x"
- << 1)))));
+ << "x" << 1)))));
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
FTSIndexFormat::getKeys(spec,
BSON("data"
<< "cat"
- << "x"
- << 5),
+ << "x" << 5),
&keys);
ASSERT_EQUALS(1U, keys.size());
@@ -94,8 +92,7 @@ TEST(FTSIndexFormat, ExtraFront1) {
FTSIndexFormat::getKeys(spec,
BSON("data"
<< "cat"
- << "x"
- << 5),
+ << "x" << 5),
&keys);
ASSERT_EQUALS(1U, keys.size());
@@ -158,8 +155,7 @@ void assertEqualsIndexKeys(std::set<std::string>& expectedKeys, const BSONObjSet
TEST(FTSIndexFormat, LongWordsTextIndexVersion1) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text")
- << "textIndexVersion"
- << 1))));
+ << "textIndexVersion" << 1))));
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
string longPrefix(1024U, 'a');
// "aaa...aaacat"
@@ -188,8 +184,7 @@ TEST(FTSIndexFormat, LongWordsTextIndexVersion1) {
TEST(FTSIndexFormat, LongWordTextIndexVersion2) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text")
- << "textIndexVersion"
- << 2))));
+ << "textIndexVersion" << 2))));
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
string longPrefix(1024U, 'a');
// "aaa...aaacat"
@@ -222,8 +217,7 @@ TEST(FTSIndexFormat, LongWordTextIndexVersion2) {
TEST(FTSIndexFormat, LongWordTextIndexVersion3) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text")
- << "textIndexVersion"
- << 3))));
+ << "textIndexVersion" << 3))));
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
string longPrefix(1024U, 'a');
// "aaa...aaacat"
diff --git a/src/mongo/db/fts/fts_language.cpp b/src/mongo/db/fts/fts_language.cpp
index 7d1fdd160de..ad88ccc18d4 100644
--- a/src/mongo/db/fts/fts_language.cpp
+++ b/src/mongo/db/fts/fts_language.cpp
@@ -82,7 +82,7 @@ LanguageMap languageMapV2;
// Case-sensitive by lookup key.
typedef std::map<StringData, const FTSLanguage*> LanguageMapLegacy;
LanguageMapLegacy languageMapV1;
-}
+} // namespace
MONGO_INITIALIZER_GROUP(FTSAllLanguagesRegistered, MONGO_NO_PREREQUISITES, MONGO_NO_DEPENDENTS);
@@ -277,10 +277,10 @@ StatusWithFTSLanguage FTSLanguage::make(StringData langName, TextIndexVersion te
if (it == languageMap->end()) {
// TEXT_INDEX_VERSION_2 and above reject unrecognized language strings.
- Status status = Status(ErrorCodes::BadValue,
- str::stream() << "unsupported language: \"" << langName
- << "\" for text index version "
- << textIndexVersion);
+ Status status =
+ Status(ErrorCodes::BadValue,
+ str::stream() << "unsupported language: \"" << langName
+ << "\" for text index version " << textIndexVersion);
return StatusWithFTSLanguage(status);
}
@@ -312,5 +312,5 @@ std::unique_ptr<FTSTokenizer> UnicodeFTSLanguage::createTokenizer() const {
const FTSPhraseMatcher& UnicodeFTSLanguage::getPhraseMatcher() const {
return _unicodePhraseMatcher;
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_language.h b/src/mongo/db/fts/fts_language.h
index 47a6ab2213d..8bdcd1aa5ce 100644
--- a/src/mongo/db/fts/fts_language.h
+++ b/src/mongo/db/fts/fts_language.h
@@ -168,5 +168,5 @@ private:
extern BasicFTSLanguage languagePorterV1;
extern BasicFTSLanguage languageEnglishV2;
extern BasicFTSLanguage languageFrenchV2;
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_language_test.cpp b/src/mongo/db/fts/fts_language_test.cpp
index e229bbdf0bc..29166d88319 100644
--- a/src/mongo/db/fts/fts_language_test.cpp
+++ b/src/mongo/db/fts/fts_language_test.cpp
@@ -175,5 +175,5 @@ TEST(FTSLanguageV1, Empty) {
ASSERT(swl.getStatus().isOK());
ASSERT_EQUALS(swl.getValue()->str(), "none");
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_matcher.cpp b/src/mongo/db/fts/fts_matcher.cpp
index e14a14d4464..be9daa5801d 100644
--- a/src/mongo/db/fts/fts_matcher.cpp
+++ b/src/mongo/db/fts/fts_matcher.cpp
@@ -176,5 +176,5 @@ FTSTokenizer::Options FTSMatcher::_getTokenizerOptions() const {
return tokenizerOptions;
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_matcher.h b/src/mongo/db/fts/fts_matcher.h
index 5dbcc981109..660194a9585 100644
--- a/src/mongo/db/fts/fts_matcher.h
+++ b/src/mongo/db/fts/fts_matcher.h
@@ -112,5 +112,5 @@ private:
const FTSQueryImpl _query;
const FTSSpec _spec;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_matcher_test.cpp b/src/mongo/db/fts/fts_matcher_test.cpp
index 31f05cf2268..46c292ce55a 100644
--- a/src/mongo/db/fts/fts_matcher_test.cpp
+++ b/src/mongo/db/fts/fts_matcher_test.cpp
@@ -278,5 +278,5 @@ TEST(FTSMatcher, NegativePhrasesMatchWithCase) {
ASSERT_FALSE(docNegativePhrasesMatchWithCase("John Runs", "-\"n R\""));
ASSERT_FALSE(docNegativePhrasesMatchWithCase("John Runs", "-\"John\" -\"Running\""));
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_impl.cpp b/src/mongo/db/fts/fts_query_impl.cpp
index c9f7e151b9d..a60ee888e66 100644
--- a/src/mongo/db/fts/fts_query_impl.cpp
+++ b/src/mongo/db/fts/fts_query_impl.cpp
@@ -204,5 +204,5 @@ BSONObj FTSQueryImpl::toBSON() const {
bob.append("negatedPhrases", getNegatedPhr());
return bob.obj();
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_impl.h b/src/mongo/db/fts/fts_query_impl.h
index d399ee73763..97cdb8388df 100644
--- a/src/mongo/db/fts/fts_query_impl.h
+++ b/src/mongo/db/fts/fts_query_impl.h
@@ -84,5 +84,5 @@ private:
std::vector<std::string> _negatedPhrases;
std::set<std::string> _termsForBounds;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_impl_test.cpp b/src/mongo/db/fts/fts_query_impl_test.cpp
index d458004b0a5..b3b4cad71f1 100644
--- a/src/mongo/db/fts/fts_query_impl_test.cpp
+++ b/src/mongo/db/fts/fts_query_impl_test.cpp
@@ -478,5 +478,5 @@ TEST(FTSQueryImpl, CloneParsedQuery) {
ASSERT(castedClone->getNegatedPhr() == q.getNegatedPhr());
ASSERT(castedClone->getTermsForBounds() == q.getTermsForBounds());
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_parser.cpp b/src/mongo/db/fts/fts_query_parser.cpp
index a346e03451b..c6038be4575 100644
--- a/src/mongo/db/fts/fts_query_parser.cpp
+++ b/src/mongo/db/fts/fts_query_parser.cpp
@@ -102,5 +102,5 @@ QueryToken::Type FTSQueryParser::getType(char c) const {
return QueryToken::TEXT;
}
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_parser.h b/src/mongo/db/fts/fts_query_parser.h
index f4bab3e7e1c..4f11799337c 100644
--- a/src/mongo/db/fts/fts_query_parser.h
+++ b/src/mongo/db/fts/fts_query_parser.h
@@ -84,5 +84,5 @@ private:
bool _previousWhiteSpace;
const StringData _raw;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_spec.cpp b/src/mongo/db/fts/fts_spec.cpp
index 20560ccdad5..c358ba4b679 100644
--- a/src/mongo/db/fts/fts_spec.cpp
+++ b/src/mongo/db/fts/fts_spec.cpp
@@ -59,9 +59,9 @@ const std::string moduleDefaultLanguage("english");
bool validateOverride(const string& override) {
// The override field can't be empty, can't be prefixed with a dollar sign, and
// can't contain a dot.
- return !override.empty()&& override[0] != '$' && override.find('.') == std::string::npos;
-}
+ return !override.empty() && override[0] != '$' && override.find('.') == std::string::npos;
}
+} // namespace
FTSSpec::FTSSpec(const BSONObj& indexInfo) {
// indexInfo is a text index spec. Text index specs pass through fixSpec() before being
@@ -90,12 +90,8 @@ FTSSpec::FTSSpec(const BSONObj& indexInfo) {
msgasserted(17364,
str::stream() << "attempt to use unsupported textIndexVersion "
<< textIndexVersionElt.numberInt()
- << "; versions supported: "
- << TEXT_INDEX_VERSION_3
- << ", "
- << TEXT_INDEX_VERSION_2
- << ", "
- << TEXT_INDEX_VERSION_1);
+ << "; versions supported: " << TEXT_INDEX_VERSION_3 << ", "
+ << TEXT_INDEX_VERSION_2 << ", " << TEXT_INDEX_VERSION_1);
}
// Initialize _defaultLanguage. Note that the FTSLanguage constructor requires
@@ -272,7 +268,7 @@ Status verifyFieldNameNotReserved(StringData s) {
return Status::OK();
}
-}
+} // namespace
StatusWith<BSONObj> FTSSpec::fixSpec(const BSONObj& spec) {
if (spec["textIndexVersion"].numberInt() == TEXT_INDEX_VERSION_1) {
@@ -406,9 +402,7 @@ StatusWith<BSONObj> FTSSpec::fixSpec(const BSONObj& spec) {
if (i->second <= 0 || i->second >= MAX_WORD_WEIGHT) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "text index weight must be in the exclusive interval (0,"
- << MAX_WORD_WEIGHT
- << ") but found: "
- << i->second};
+ << MAX_WORD_WEIGHT << ") but found: " << i->second};
}
// Verify weight refers to a valid field.
@@ -513,5 +507,5 @@ StatusWith<BSONObj> FTSSpec::fixSpec(const BSONObj& spec) {
return b.obj();
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_spec_legacy.cpp b/src/mongo/db/fts/fts_spec_legacy.cpp
index 53169f5e213..1d58c1da750 100644
--- a/src/mongo/db/fts/fts_spec_legacy.cpp
+++ b/src/mongo/db/fts/fts_spec_legacy.cpp
@@ -48,7 +48,7 @@ void _addFTSStuff(BSONObjBuilder* b) {
b->append("_fts", INDEX_NAME);
b->append("_ftsx", 1);
}
-}
+} // namespace
const FTSLanguage& FTSSpec::_getLanguageToUseV1(const BSONObj& userDoc) const {
BSONElement e = userDoc[_languageOverrideField];
@@ -240,9 +240,7 @@ StatusWith<BSONObj> FTSSpec::_fixSpecV1(const BSONObj& spec) {
if (kv.second <= 0 || kv.second >= MAX_WORD_WEIGHT) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "text index weight must be in the exclusive interval (0,"
- << MAX_WORD_WEIGHT
- << ") but found: "
- << kv.second};
+ << MAX_WORD_WEIGHT << ") but found: " << kv.second};
}
b.append(kv.first, kv.second);
}
@@ -303,5 +301,5 @@ StatusWith<BSONObj> FTSSpec::_fixSpecV1(const BSONObj& spec) {
return b.obj();
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_spec_test.cpp b/src/mongo/db/fts/fts_spec_test.cpp
index f715b6f05ec..047968f2541 100644
--- a/src/mongo/db/fts/fts_spec_test.cpp
+++ b/src/mongo/db/fts/fts_spec_test.cpp
@@ -184,8 +184,7 @@ TEST(FTSSpec, ScoreSingleField1) {
<< "text"
<< "text"
<< "text")
- << "weights"
- << BSON("title" << 10));
+ << "weights" << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -204,8 +203,7 @@ TEST(FTSSpec, ScoreMultipleField1) {
<< "text"
<< "text"
<< "text")
- << "weights"
- << BSON("title" << 10));
+ << "weights" << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -247,8 +245,7 @@ TEST(FTSSpec, ScoreRepeatWord) {
<< "text"
<< "text"
<< "text")
- << "weights"
- << BSON("title" << 10));
+ << "weights" << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -273,8 +270,7 @@ TEST(FTSSpec, Extra1) {
TEST(FTSSpec, Extra2) {
BSONObj user = BSON("key" << BSON("data"
<< "text"
- << "x"
- << 1));
+ << "x" << 1));
BSONObj fixed = assertGet(FTSSpec::fixSpec(user));
FTSSpec spec(fixed);
ASSERT_EQUALS(0U, spec.numExtraBefore());
@@ -292,8 +288,7 @@ TEST(FTSSpec, Extra3) {
ASSERT_BSONOBJ_EQ(BSON("x" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1),
+ << "_ftsx" << 1),
fixed["key"].Obj());
ASSERT_BSONOBJ_EQ(BSON("data" << 1), fixed["weights"].Obj());
@@ -520,8 +515,7 @@ TEST(FTSSpec, NestedLanguages_Wildcard) {
TEST(FTSSpec, NestedLanguages_WildcardOverride) {
BSONObj indexSpec = BSON("key" << BSON("$**"
<< "text")
- << "weights"
- << BSON("d.e.f" << 20));
+ << "weights" << BSON("d.e.f" << 20));
FTSSpec spec(assertGet(FTSSpec::fixSpec(indexSpec)));
TermFrequencyMap tfm;
@@ -598,5 +592,5 @@ TEST(FTSSpec, TextIndexLegacyLanguageRecognition) {
ASSERT_EQUALS(tfm.size(), 0U); // "the" recognized as stopword
}
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_util.cpp b/src/mongo/db/fts/fts_util.cpp
index 5ef93b16559..f9de9ae33d7 100644
--- a/src/mongo/db/fts/fts_util.cpp
+++ b/src/mongo/db/fts/fts_util.cpp
@@ -35,5 +35,5 @@ namespace fts {
const std::string INDEX_NAME = "text";
const std::string WILDCARD = "$**";
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_util.h b/src/mongo/db/fts/fts_util.h
index 71eebcbf5f3..90eaa9095f6 100644
--- a/src/mongo/db/fts/fts_util.h
+++ b/src/mongo/db/fts/fts_util.h
@@ -46,5 +46,5 @@ enum TextIndexVersion {
TEXT_INDEX_VERSION_2 = 2, // Index format with ASCII support and murmur hashing.
TEXT_INDEX_VERSION_3 = 3, // Current index format with basic Unicode support.
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stemmer.cpp b/src/mongo/db/fts/stemmer.cpp
index 5b9fcdadc3c..2925ca0dbba 100644
--- a/src/mongo/db/fts/stemmer.cpp
+++ b/src/mongo/db/fts/stemmer.cpp
@@ -63,5 +63,5 @@ StringData Stemmer::stem(StringData word) const {
return StringData((const char*)(sb_sym), sb_stemmer_length(_stemmer));
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stemmer.h b/src/mongo/db/fts/stemmer.h
index a5a15174a94..e3608071010 100644
--- a/src/mongo/db/fts/stemmer.h
+++ b/src/mongo/db/fts/stemmer.h
@@ -63,5 +63,5 @@ public:
private:
struct sb_stemmer* _stemmer;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stemmer_test.cpp b/src/mongo/db/fts/stemmer_test.cpp
index 42c67d7f97b..be09fe34b8c 100644
--- a/src/mongo/db/fts/stemmer_test.cpp
+++ b/src/mongo/db/fts/stemmer_test.cpp
@@ -47,5 +47,5 @@ TEST(English, Caps) {
ASSERT_EQUALS("unit", s.stem("united"));
ASSERT_EQUALS("Unite", s.stem("United"));
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stop_words.cpp b/src/mongo/db/fts/stop_words.cpp
index 48db6836736..39be67707bc 100644
--- a/src/mongo/db/fts/stop_words.cpp
+++ b/src/mongo/db/fts/stop_words.cpp
@@ -44,7 +44,7 @@ void loadStopWordMap(StringMap<std::set<std::string>>* m);
namespace {
StringMap<std::shared_ptr<StopWords>> StopWordsMap;
StopWords empty;
-}
+} // namespace
StopWords::StopWords() {}
@@ -70,5 +70,5 @@ MONGO_INITIALIZER(StopWords)(InitializerContext* context) {
}
return Status::OK();
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stop_words.h b/src/mongo/db/fts/stop_words.h
index 22835300226..6c1c1cc07e1 100644
--- a/src/mongo/db/fts/stop_words.h
+++ b/src/mongo/db/fts/stop_words.h
@@ -61,5 +61,5 @@ public:
private:
StringMap<bool> _words; // Used as a set. The values have no meaning.
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stop_words_test.cpp b/src/mongo/db/fts/stop_words_test.cpp
index 96b1e941d3b..f0fb8ec37b8 100644
--- a/src/mongo/db/fts/stop_words_test.cpp
+++ b/src/mongo/db/fts/stop_words_test.cpp
@@ -41,5 +41,5 @@ TEST(English, Basic1) {
ASSERT(englishStopWords->isStopWord("the"));
ASSERT(!englishStopWords->isStopWord("computer"));
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/tokenizer.cpp b/src/mongo/db/fts/tokenizer.cpp
index 3de9eb00689..1463dc212bf 100644
--- a/src/mongo/db/fts/tokenizer.cpp
+++ b/src/mongo/db/fts/tokenizer.cpp
@@ -132,5 +132,5 @@ Token::Type Tokenizer::_type(char c) const {
return Token::TEXT;
}
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/tokenizer.h b/src/mongo/db/fts/tokenizer.h
index 1a0e79d9425..426449724e8 100644
--- a/src/mongo/db/fts/tokenizer.h
+++ b/src/mongo/db/fts/tokenizer.h
@@ -70,5 +70,5 @@ private:
const StringData _raw;
bool _english;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/tokenizer_test.cpp b/src/mongo/db/fts/tokenizer_test.cpp
index 9f09736587a..db61f3abc7d 100644
--- a/src/mongo/db/fts/tokenizer_test.cpp
+++ b/src/mongo/db/fts/tokenizer_test.cpp
@@ -117,5 +117,5 @@ TEST(Tokenizer, Quote1French) {
ASSERT_EQUALS("s", b.data.toString());
ASSERT_EQUALS("car", c.data.toString());
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/unicode/string.cpp b/src/mongo/db/fts/unicode/string.cpp
index 201c3539d61..8b97a671d92 100644
--- a/src/mongo/db/fts/unicode/string.cpp
+++ b/src/mongo/db/fts/unicode/string.cpp
@@ -61,7 +61,7 @@ inline void appendUtf8Codepoint(char32_t codepoint, OutputIterator* outputIt) {
*(*outputIt)++ = (((codepoint >> (6 * 0)) & 0x3f) | 0x80);
}
}
-}
+} // namespace
using linenoise_utf8::copyString32to8;
using linenoise_utf8::copyString8to32;
diff --git a/src/mongo/db/fts/unicode/string_test.cpp b/src/mongo/db/fts/unicode/string_test.cpp
index 2d3a386d1ec..a2943877b28 100644
--- a/src/mongo/db/fts/unicode/string_test.cpp
+++ b/src/mongo/db/fts/unicode/string_test.cpp
@@ -66,7 +66,7 @@ auto kCaseSensitive = String::kCaseSensitive;
auto kTurkish = CaseFoldMode::kTurkish;
auto kNormal = CaseFoldMode::kNormal;
-}
+} // namespace
// Macro to preserve line numbers and arguments in error messages.
diff --git a/src/mongo/db/geo/big_polygon.cpp b/src/mongo/db/geo/big_polygon.cpp
index 4f76faac823..e68191c7ff5 100644
--- a/src/mongo/db/geo/big_polygon.cpp
+++ b/src/mongo/db/geo/big_polygon.cpp
@@ -228,4 +228,4 @@ bool BigSimplePolygon::Decode(Decoder* const decoder) {
bool BigSimplePolygon::DecodeWithinScope(Decoder* const decoder) {
MONGO_UNREACHABLE;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/geo/big_polygon.h b/src/mongo/db/geo/big_polygon.h
index bc0e4ce75f1..6df8d3e4fd9 100644
--- a/src/mongo/db/geo/big_polygon.h
+++ b/src/mongo/db/geo/big_polygon.h
@@ -115,4 +115,4 @@ private:
mutable std::unique_ptr<S2Polyline> _borderLine;
mutable std::unique_ptr<S2Polygon> _borderPoly;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/geo/big_polygon_test.cpp b/src/mongo/db/geo/big_polygon_test.cpp
index b29b7c3eb4a..2a42706906d 100644
--- a/src/mongo/db/geo/big_polygon_test.cpp
+++ b/src/mongo/db/geo/big_polygon_test.cpp
@@ -36,8 +36,8 @@
namespace {
using namespace mongo;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
// Helper to build a vector of S2Point
@@ -81,8 +81,7 @@ typedef PointBuilder points;
TEST(BigSimplePolygon, Basic) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// A 10x10 square centered at [0,0]
S2Polygon poly10(loopVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
@@ -95,8 +94,7 @@ TEST(BigSimplePolygon, Basic) {
// A 20x20 square centered at [0,20]
BigSimplePolygon bigPoly20Offset(loop(points() << LatLng(10.0, 30.0) << LatLng(10.0, 10.0)
- << LatLng(-10.0, 10.0)
- << LatLng(-10.0, 30.0)));
+ << LatLng(-10.0, 10.0) << LatLng(-10.0, 30.0)));
ASSERT_LESS_THAN(bigPoly20Offset.GetArea(), 2 * M_PI);
ASSERT_LESS_THAN(poly10.GetArea(), bigPoly20Offset.GetArea());
@@ -108,18 +106,15 @@ TEST(BigSimplePolygon, BasicWithHole) {
// A 30x30 square centered at [0,0] with a 20X20 hole
vector<S2Loop*> loops;
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
S2Polygon holePoly(&loops);
// A 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16.Contains(holePoly));
@@ -127,8 +122,7 @@ TEST(BigSimplePolygon, BasicWithHole) {
// A big polygon bigger than the hole.
BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24.Contains(holePoly));
ASSERT_TRUE(bigPoly24.Intersects(holePoly));
@@ -139,12 +133,10 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
vector<S2Loop*> loops;
// Border
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
// Hole
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// Shell
loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
<< LatLng(-5.0, 5.0)));
@@ -152,24 +144,21 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
// A 16X16 square centered at [0,0] containing the shell
BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16.Contains(shellPoly));
ASSERT_TRUE(bigPoly16.Intersects(shellPoly));
// Try a big polygon bigger than the hole.
BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24.Contains(shellPoly));
ASSERT_TRUE(bigPoly24.Intersects(shellPoly));
// Try a big polygon smaller than the shell.
BigSimplePolygon bigPoly8(loop(points() << LatLng(4.0, 4.0) << LatLng(4.0, -4.0)
- << LatLng(-4.0, -4.0)
- << LatLng(-4.0, 4.0)));
+ << LatLng(-4.0, -4.0) << LatLng(-4.0, 4.0)));
ASSERT_LESS_THAN(bigPoly8.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly8.Contains(shellPoly));
ASSERT_TRUE(bigPoly8.Intersects(shellPoly));
@@ -178,8 +167,7 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
TEST(BigSimplePolygon, BasicComplement) {
// Everything *not* in a 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
bigPoly20Comp.Invert();
// A 10x10 square centered at [0,0]
@@ -192,8 +180,7 @@ TEST(BigSimplePolygon, BasicComplement) {
// A 10x10 square centered at [0,20], contained by bigPoly20Comp
S2Polygon poly10Contained(loopVec(points() << LatLng(25.0, 25.0) << LatLng(25.0, 15.0)
- << LatLng(15.0, 15.0)
- << LatLng(15.0, 25.0)));
+ << LatLng(15.0, 15.0) << LatLng(15.0, 25.0)));
ASSERT_LESS_THAN(poly10Contained.GetArea(), bigPoly20Comp.GetArea());
ASSERT(bigPoly20Comp.Contains(poly10Contained));
@@ -202,8 +189,7 @@ TEST(BigSimplePolygon, BasicComplement) {
// A 30x30 square centered at [0,0], so that bigPoly20Comp contains its complement entirely,
// which is not allowed by S2.
S2Polygon poly30(loopVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
ASSERT_LESS_THAN(poly30.GetArea(), bigPoly20Comp.GetArea());
ASSERT_FALSE(bigPoly20Comp.Contains(poly30));
ASSERT_TRUE(bigPoly20Comp.Intersects(poly30));
@@ -212,8 +198,7 @@ TEST(BigSimplePolygon, BasicComplement) {
TEST(BigSimplePolygon, BasicIntersects) {
// Everything *not* in a 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
bigPoly20.Invert();
// A 10x10 square centered at [10,10] (partial overlap)
@@ -228,19 +213,16 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// A 30x30 square centered at [0,0] with a 20X20 hole
vector<S2Loop*> loops;
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
S2Polygon holePoly(&loops);
// 1. BigPolygon doesn't touch holePoly
// Everything *not* in a 40x40 square centered at [0,0]
BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
- << LatLng(-20.0, -20.0)
- << LatLng(-20.0, 20.0)));
+ << LatLng(-20.0, -20.0) << LatLng(-20.0, 20.0)));
bigPoly40Comp.Invert();
ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40Comp.Contains(holePoly));
@@ -249,8 +231,7 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 2. BigPolygon intersects holePoly
// Everything *not* in a 24X24 square centered at [0,0]
BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
bigPoly24Comp.Invert();
ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24Comp.Contains(holePoly));
@@ -259,8 +240,7 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 3. BigPolygon contains holePoly
// Everything *not* in a 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
bigPoly16Comp.Invert();
ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
ASSERT_TRUE(bigPoly16Comp.Contains(holePoly));
@@ -268,9 +248,9 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 4. BigPolygon contains the right half of holePoly
// Everything *not* in a 40x40 square centered at [0,20]
- BigSimplePolygon bigPoly40CompOffset(loop(points() << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
- << LatLng(-20.0, 0.0)
- << LatLng(-20.0, 40.0)));
+ BigSimplePolygon bigPoly40CompOffset(loop(points()
+ << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
+ << LatLng(-20.0, 0.0) << LatLng(-20.0, 40.0)));
bigPoly40CompOffset.Invert();
ASSERT_GREATER_THAN(bigPoly40CompOffset.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40CompOffset.Contains(holePoly));
@@ -282,12 +262,10 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
vector<S2Loop*> loops;
// Border
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
// Hole
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// Shell
loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
<< LatLng(-5.0, 5.0)));
@@ -296,8 +274,7 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 1. BigPolygon doesn't touch shellPoly
// Everything *not* in a 40x40 square centered at [0,0]
BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
- << LatLng(-20.0, -20.0)
- << LatLng(-20.0, 20.0)));
+ << LatLng(-20.0, -20.0) << LatLng(-20.0, 20.0)));
bigPoly40Comp.Invert();
ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40Comp.Contains(shellPoly));
@@ -306,8 +283,7 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 2. BigPolygon intersects shellPoly
// Everything *not* in a 24X24 square centered at [0,0]
BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
bigPoly24Comp.Invert();
ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24Comp.Contains(shellPoly));
@@ -316,8 +292,7 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 3. BigPolygon contains shellPoly's outer ring
// Everything *not* in a 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
bigPoly16Comp.Invert();
ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16Comp.Contains(shellPoly));
@@ -325,9 +300,9 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 4. BigPolygon contains the right half of shellPoly
// Everything *not* in a 40x40 square centered at [0,20]
- BigSimplePolygon bigPoly40CompOffset(loop(points() << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
- << LatLng(-20.0, 0.0)
- << LatLng(-20.0, 40.0)));
+ BigSimplePolygon bigPoly40CompOffset(loop(points()
+ << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
+ << LatLng(-20.0, 0.0) << LatLng(-20.0, 40.0)));
bigPoly40CompOffset.Invert();
ASSERT_GREATER_THAN(bigPoly40CompOffset.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40CompOffset.Contains(shellPoly));
@@ -335,8 +310,7 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 5. BigPolygon contain shellPoly (CW)
BigSimplePolygon bigPolyCompOffset(loop(points() << LatLng(6.0, 6.0) << LatLng(6.0, 8.0)
- << LatLng(-6.0, 8.0)
- << LatLng(-6.0, 6.0)));
+ << LatLng(-6.0, 8.0) << LatLng(-6.0, 6.0)));
ASSERT_GREATER_THAN(bigPolyCompOffset.GetArea(), 2 * M_PI);
ASSERT_TRUE(bigPolyCompOffset.Contains(shellPoly));
ASSERT_TRUE(bigPolyCompOffset.Intersects(shellPoly));
@@ -345,13 +319,11 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
TEST(BigSimplePolygon, BasicWinding) {
// A 20x20 square centered at [0,0] (CCW)
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// Everything *not* in a 20x20 square centered at [0,0] (CW)
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(10.0, -10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(10.0, -10.0)));
ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
@@ -360,13 +332,11 @@ TEST(BigSimplePolygon, BasicWinding) {
TEST(BigSimplePolygon, LineRelations) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0)
- << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
ASSERT(bigPoly20.Contains(line10));
@@ -386,14 +356,12 @@ TEST(BigSimplePolygon, LineRelations) {
TEST(BigSimplePolygon, LineRelationsComplement) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
bigPoly20Comp.Invert();
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0)
- << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly20Comp.Contains(line10));
@@ -406,8 +374,7 @@ TEST(BigSimplePolygon, LineRelationsComplement) {
// A 10x10 line circling [0,0]
S2Polyline line30(pointVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
ASSERT_TRUE(bigPoly20Comp.Contains(line30));
ASSERT_TRUE(bigPoly20Comp.Intersects(line30));
}
@@ -415,13 +382,11 @@ TEST(BigSimplePolygon, LineRelationsComplement) {
TEST(BigSimplePolygon, LineRelationsWinding) {
// Everything *not* in a 20x20 square centered at [0,0] (CW winding)
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(10.0, -10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(10.0, -10.0)));
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0)
- << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly20Comp.Contains(line10));
@@ -431,13 +396,11 @@ TEST(BigSimplePolygon, LineRelationsWinding) {
TEST(BigSimplePolygon, PolarContains) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
// Square 5 degrees from the north pole [90, 0]
S2Polygon northPoly(loopVec(points() << LatLng(85.0, 0.0) << LatLng(85.0, 90.0)
- << LatLng(85.0, 180.0)
- << LatLng(85.0, -90.0)));
+ << LatLng(85.0, 180.0) << LatLng(85.0, -90.0)));
ASSERT_LESS_THAN(bigNorthPoly.GetArea(), 2 * M_PI);
ASSERT_LESS_THAN(northPoly.GetArea(), bigNorthPoly.GetArea());
@@ -448,8 +411,7 @@ TEST(BigSimplePolygon, PolarContains) {
TEST(BigSimplePolygon, PolarContainsWithHoles) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
// Square 5 degrees from the north pole [90, 0] with a concentric hole 1 degree from the
// north pole
@@ -468,8 +430,7 @@ TEST(BigSimplePolygon, PolarContainsWithHoles) {
TEST(BigSimplePolygon, PolarIntersectsWithHoles) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
// 5-degree square with 1-degree-wide concentric hole, centered on [80.0, 0.0]
vector<S2Loop*> loops;
@@ -512,8 +473,7 @@ void checkConsistency(const BigSimplePolygon& bigPoly,
TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Big polygon smaller than a hemisphere.
BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
// Vertex point and collinear point
@@ -522,12 +482,10 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Polygon shares one edge
S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, -10.0)
- << LatLng(80.0, -10.0)));
+ << LatLng(-80.0, -10.0) << LatLng(80.0, -10.0)));
// Polygon shares a segment of one edge
S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, -10.0)
- << LatLng(50.0, -10.0)));
+ << LatLng(-50.0, -10.0) << LatLng(50.0, -10.0)));
// Line
S2Polyline line(
@@ -538,12 +496,9 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Big polygon larger than a hemisphere.
BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(-80.0, 180.0)
- << LatLng(-80.0, -90.0)
- << LatLng(80.0, -90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0) << LatLng(-80.0, 180.0)
+ << LatLng(-80.0, -90.0) << LatLng(80.0, -90.0)
+ << LatLng(80.0, 180.0) << LatLng(80.0, 90.0)));
ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
checkConsistency(bigPoly, expandedBigPoly, point);
@@ -571,18 +526,15 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
TEST(BigSimplePolygon, ShareEdgeContained) {
// Big polygon smaller than a hemisphere.
BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
// Polygon
S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 10.0)
- << LatLng(80.0, 10.0)));
+ << LatLng(-80.0, 10.0) << LatLng(80.0, 10.0)));
// Polygon shares a segment of one edge
S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, 10.0)
- << LatLng(50.0, 10.0)));
+ << LatLng(-50.0, 10.0) << LatLng(50.0, 10.0)));
// Line
S2Polyline line(
pointVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0) << LatLng(0.0, 10.0)));
@@ -592,12 +544,9 @@ TEST(BigSimplePolygon, ShareEdgeContained) {
// Big polygon larger than a hemisphere.
BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(-80.0, 180.0)
- << LatLng(-80.0, -90.0)
- << LatLng(80.0, -90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0) << LatLng(-80.0, 180.0)
+ << LatLng(-80.0, -90.0) << LatLng(80.0, -90.0)
+ << LatLng(80.0, 180.0) << LatLng(80.0, 90.0)));
ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
checkConsistency(bigPoly, expandedBigPoly, poly);
@@ -616,4 +565,4 @@ TEST(BigSimplePolygon, ShareEdgeContained) {
checkConsistency(bigPoly, expandedBigPoly, line);
checkConsistency(bigPoly, expandedBigPoly, collinearLine);
}
-}
+} // namespace
diff --git a/src/mongo/db/geo/geoparser.cpp b/src/mongo/db/geo/geoparser.cpp
index 3db8485eb6e..1d8fef041ce 100644
--- a/src/mongo/db/geo/geoparser.cpp
+++ b/src/mongo/db/geo/geoparser.cpp
@@ -230,8 +230,7 @@ static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem,
"Secondary loops not contained by first exterior loop - "
"secondary loops must be holes: "
<< coordinateElt.toString(false)
- << " first loop: "
- << elem.Obj().firstElement().toString(false));
+ << " first loop: " << elem.Obj().firstElement().toString(false));
}
}
diff --git a/src/mongo/db/geo/geoparser_test.cpp b/src/mongo/db/geo/geoparser_test.cpp
index 921ba70e6d6..01eba23667a 100644
--- a/src/mongo/db/geo/geoparser_test.cpp
+++ b/src/mongo/db/geo/geoparser_test.cpp
@@ -434,4 +434,4 @@ TEST(GeoParser, parseGeometryCollection) {
ASSERT_TRUE(gc.supportsContains());
}
}
-}
+} // namespace
diff --git a/src/mongo/db/geo/hash.cpp b/src/mongo/db/geo/hash.cpp
index 761f8c79213..f8ebed9a7da 100644
--- a/src/mongo/db/geo/hash.cpp
+++ b/src/mongo/db/geo/hash.cpp
@@ -667,19 +667,13 @@ Status GeoHashConverter::parseParameters(const BSONObj& paramDoc,
if (params->bits < 1 || params->bits > 32) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "bits for hash must be > 0 and <= 32, "
- << "but "
- << params->bits
- << " bits were specified");
+ << "but " << params->bits << " bits were specified");
}
if (params->min >= params->max) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "region for hash must be valid and have positive area, "
- << "but ["
- << params->min
- << ", "
- << params->max
- << "] "
+ << "but [" << params->min << ", " << params->max << "] "
<< "was specified");
}
@@ -774,8 +768,7 @@ GeoHash GeoHashConverter::hash(const BSONObj& o, const BSONObj* src) const {
GeoHash GeoHashConverter::hash(double x, double y) const {
uassert(16433,
str::stream() << "point not in interval of [ " << _params.min << ", " << _params.max
- << " ]"
- << causedBy(BSON_ARRAY(x << y).toString()),
+ << " ]" << causedBy(BSON_ARRAY(x << y).toString()),
x <= _params.max && x >= _params.min && y <= _params.max && y >= _params.min);
return GeoHash(convertToHashScale(x), convertToHashScale(y), _params.bits);
diff --git a/src/mongo/db/geo/hash_test.cpp b/src/mongo/db/geo/hash_test.cpp
index 1681803083f..288a0895d02 100644
--- a/src/mongo/db/geo/hash_test.cpp
+++ b/src/mongo/db/geo/hash_test.cpp
@@ -549,4 +549,4 @@ TEST(GeoHash, ClearUnusedBitsIsNoopIfNoBitsAreUnused) {
GeoHash other = geoHash.parent(32);
ASSERT_EQUALS(geoHash, other);
}
-}
+} // namespace
diff --git a/src/mongo/db/geo/r2_region_coverer.cpp b/src/mongo/db/geo/r2_region_coverer.cpp
index 67ebbf37924..284350f62ab 100644
--- a/src/mongo/db/geo/r2_region_coverer.cpp
+++ b/src/mongo/db/geo/r2_region_coverer.cpp
@@ -332,7 +332,7 @@ void getDifferenceInternal(GeoHash cellId,
}
}
}
-}
+} // namespace
void R2CellUnion::getDifference(const R2CellUnion& cellUnion) {
std::vector<GeoHash> diffCellIds;
diff --git a/src/mongo/db/geo/shapes.h b/src/mongo/db/geo/shapes.h
index ca400eaa829..be466668110 100644
--- a/src/mongo/db/geo/shapes.h
+++ b/src/mongo/db/geo/shapes.h
@@ -64,8 +64,9 @@ inline double rad2deg(const double rad) {
inline double computeXScanDistance(double y, double maxDistDegrees) {
// TODO: this overestimates for large maxDistDegrees far from the equator
- return maxDistDegrees / std::min(cos(deg2rad(std::min(+89.0, y + maxDistDegrees))),
- cos(deg2rad(std::max(-89.0, y - maxDistDegrees))));
+ return maxDistDegrees /
+ std::min(cos(deg2rad(std::min(+89.0, y + maxDistDegrees))),
+ cos(deg2rad(std::max(-89.0, y - maxDistDegrees))));
}
bool isValidLngLat(double lng, double lat);
diff --git a/src/mongo/db/hasher.h b/src/mongo/db/hasher.h
index 20519e6a58f..a4e86a1b5aa 100644
--- a/src/mongo/db/hasher.h
+++ b/src/mongo/db/hasher.h
@@ -71,4 +71,4 @@ public:
private:
BSONElementHasher();
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/hasher_test.cpp b/src/mongo/db/hasher_test.cpp
index 63ec64417af..fd02d21e12c 100644
--- a/src/mongo/db/hasher_test.cpp
+++ b/src/mongo/db/hasher_test.cpp
@@ -272,8 +272,7 @@ TEST(BSONElementHasher, HashString) {
TEST(BSONElementHasher, HashObject) {
BSONObj o = BSON("check" << BSON("a"
<< "abc"
- << "b"
- << 123LL));
+ << "b" << 123LL));
ASSERT_EQUALS(hashIt(o), 4771603801758380216LL);
o = BSON("check" << BSONObj());
diff --git a/src/mongo/db/index/btree_key_generator.cpp b/src/mongo/db/index/btree_key_generator.cpp
index f1c860f4cfc..7f337879d9d 100644
--- a/src/mongo/db/index/btree_key_generator.cpp
+++ b/src/mongo/db/index/btree_key_generator.cpp
@@ -98,9 +98,7 @@ BSONElement BtreeKeyGenerator::_extractNextElement(const BSONObj& obj,
16746,
str::stream() << "Ambiguous field name found in array (do not use numeric field names in "
"embedded elements in an array), field: '"
- << arrField.fieldName()
- << "' for array: "
- << positionalInfo.arrayObj,
+ << arrField.fieldName() << "' for array: " << positionalInfo.arrayObj,
!haveObjField || !positionalInfo.hasPositionallyIndexedElt());
*arrayNestedArray = false;
diff --git a/src/mongo/db/index/btree_key_generator_test.cpp b/src/mongo/db/index/btree_key_generator_test.cpp
index 3301cc3c861..da569fdb203 100644
--- a/src/mongo/db/index/btree_key_generator_test.cpp
+++ b/src/mongo/db/index/btree_key_generator_test.cpp
@@ -43,9 +43,9 @@
#include "mongo/util/log.h"
using namespace mongo;
-using std::unique_ptr;
using std::cout;
using std::endl;
+using std::unique_ptr;
using std::vector;
namespace {
diff --git a/src/mongo/db/index/expression_params.cpp b/src/mongo/db/index/expression_params.cpp
index e47ef01a5e6..4dc0ebbb8d9 100644
--- a/src/mongo/db/index/expression_params.cpp
+++ b/src/mongo/db/index/expression_params.cpp
@@ -193,14 +193,8 @@ void ExpressionParams::initialize2dsphereParams(const BSONObj& infoObj,
massert(17395,
stream() << "unsupported geo index version { " << kIndexVersionFieldName << " : "
- << out->indexVersion
- << " }, only support versions: ["
- << S2_INDEX_VERSION_1
- << ","
- << S2_INDEX_VERSION_2
- << ","
- << S2_INDEX_VERSION_3
- << "]",
+ << out->indexVersion << " }, only support versions: [" << S2_INDEX_VERSION_1
+ << "," << S2_INDEX_VERSION_2 << "," << S2_INDEX_VERSION_3 << "]",
out->indexVersion == S2_INDEX_VERSION_3 || out->indexVersion == S2_INDEX_VERSION_2 ||
out->indexVersion == S2_INDEX_VERSION_1);
}
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index f308c4251cb..fc6f6067484 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -397,8 +397,8 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
// other writes making up this operation are given. When index builds can cope with
// replication rollbacks, side table writes associated with a CUD operation should
// remain/rollback along with the corresponding oplog entry.
- toInsert.emplace_back(BSON(
- "op" << (op == Op::kInsert ? "i" : "d") << "key" << key << "recordId" << loc.repr()));
+ toInsert.emplace_back(BSON("op" << (op == Op::kInsert ? "i" : "d") << "key" << key
+ << "recordId" << loc.repr()));
}
if (op == Op::kInsert) {
@@ -408,9 +408,7 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
for (const auto& key : multikeyMetadataKeys) {
toInsert.emplace_back(BSON("op"
<< "i"
- << "key"
- << key
- << "recordId"
+ << "key" << key << "recordId"
<< static_cast<int64_t>(
RecordId::ReservedId::kWildcardMultikeyMetadataId)));
}
@@ -421,7 +419,7 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
// operations outside this table and in the same transaction are rolled back, this counter also
// needs to be rolled back.
opCtx->recoveryUnit()->onRollback(
- [ this, size = toInsert.size() ] { _sideWritesCounter.fetchAndSubtract(size); });
+ [this, size = toInsert.size()] { _sideWritesCounter.fetchAndSubtract(size); });
std::vector<Record> records;
for (auto& doc : toInsert) {
diff --git a/src/mongo/db/index/index_build_interceptor.h b/src/mongo/db/index/index_build_interceptor.h
index 18f98cc72cf..f8afcd4f56a 100644
--- a/src/mongo/db/index/index_build_interceptor.h
+++ b/src/mongo/db/index/index_build_interceptor.h
@@ -121,9 +121,9 @@ public:
bool areAllConstraintsChecked(OperationContext* opCtx) const;
/**
- * When an index builder wants to commit, use this to retrieve any recorded multikey paths
- * that were tracked during the build.
- */
+ * When an index builder wants to commit, use this to retrieve any recorded multikey paths
+ * that were tracked during the build.
+ */
boost::optional<MultikeyPaths> getMultikeyPaths() const;
const std::string& getSideWritesTableIdent() const;
diff --git a/src/mongo/db/index/index_descriptor.cpp b/src/mongo/db/index/index_descriptor.cpp
index c88a81c305b..da27ca8a2e5 100644
--- a/src/mongo/db/index/index_descriptor.cpp
+++ b/src/mongo/db/index/index_descriptor.cpp
@@ -63,7 +63,7 @@ void populateOptionsMap(std::map<StringData, BSONElement>& theMap, const BSONObj
fieldName == IndexDescriptor::kDropDuplicatesFieldName || // this is now ignored
fieldName == IndexDescriptor::kSparseFieldName || // checked specially
fieldName == IndexDescriptor::kUniqueFieldName // check specially
- ) {
+ ) {
continue;
}
theMap[fieldName] = e;
@@ -152,8 +152,7 @@ Status IndexDescriptor::isIndexVersionAllowedForCreation(
}
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid index specification " << indexSpec
- << "; cannot create an index with v="
- << static_cast<int>(indexVersion)};
+ << "; cannot create an index with v=" << static_cast<int>(indexVersion)};
}
IndexVersion IndexDescriptor::getDefaultIndexVersion() {
diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp
index 3f9ac46d57e..6881641b23b 100644
--- a/src/mongo/db/index/s2_access_method.cpp
+++ b/src/mongo/db/index/s2_access_method.cpp
@@ -97,30 +97,18 @@ StatusWith<BSONObj> S2AccessMethod::fixSpec(const BSONObj& specObj) {
if (!indexVersionElt.isNumber()) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid type for geo index version { " << kIndexVersionFieldName
- << " : "
- << indexVersionElt
- << " }, only versions: ["
- << S2_INDEX_VERSION_1
- << ","
- << S2_INDEX_VERSION_2
- << ","
- << S2_INDEX_VERSION_3
- << "] are supported"};
+ << " : " << indexVersionElt << " }, only versions: ["
+ << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
+ << S2_INDEX_VERSION_3 << "] are supported"};
}
if (indexVersionElt.type() == BSONType::NumberDouble &&
!std::isnormal(indexVersionElt.numberDouble())) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid value for geo index version { " << kIndexVersionFieldName
- << " : "
- << indexVersionElt
- << " }, only versions: ["
- << S2_INDEX_VERSION_1
- << ","
- << S2_INDEX_VERSION_2
- << ","
- << S2_INDEX_VERSION_3
- << "] are supported"};
+ << " : " << indexVersionElt << " }, only versions: ["
+ << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
+ << S2_INDEX_VERSION_3 << "] are supported"};
}
const auto indexVersion = indexVersionElt.numberLong();
@@ -128,15 +116,9 @@ StatusWith<BSONObj> S2AccessMethod::fixSpec(const BSONObj& specObj) {
indexVersion != S2_INDEX_VERSION_3) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "unsupported geo index version { " << kIndexVersionFieldName
- << " : "
- << indexVersionElt
- << " }, only versions: ["
- << S2_INDEX_VERSION_1
- << ","
- << S2_INDEX_VERSION_2
- << ","
- << S2_INDEX_VERSION_3
- << "] are supported"};
+ << " : " << indexVersionElt << " }, only versions: ["
+ << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
+ << S2_INDEX_VERSION_3 << "] are supported"};
}
return specObj;
diff --git a/src/mongo/db/index/s2_key_generator_test.cpp b/src/mongo/db/index/s2_key_generator_test.cpp
index b57a2b58c43..93fc8ac545d 100644
--- a/src/mongo/db/index/s2_key_generator_test.cpp
+++ b/src/mongo/db/index/s2_key_generator_test.cpp
@@ -99,8 +99,7 @@ void assertMultikeyPathsEqual(const MultikeyPaths& expectedMultikeyPaths,
const MultikeyPaths& actualMultikeyPaths) {
if (expectedMultikeyPaths != actualMultikeyPaths) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expectedMultikeyPaths)
- << ", Actual: "
- << dumpMultikeyPaths(actualMultikeyPaths));
+ << ", Actual: " << dumpMultikeyPaths(actualMultikeyPaths));
}
}
@@ -109,13 +108,11 @@ long long getCellID(int x, int y, bool multiPoint = false) {
if (multiPoint) {
obj = BSON("a" << BSON("type"
<< "MultiPoint"
- << "coordinates"
- << BSON_ARRAY(BSON_ARRAY(x << y))));
+ << "coordinates" << BSON_ARRAY(BSON_ARRAY(x << y))));
} else {
obj = BSON("a" << BSON("type"
<< "Point"
- << "coordinates"
- << BSON_ARRAY(x << y)));
+ << "coordinates" << BSON_ARRAY(x << y)));
}
BSONObj keyPattern = fromjson("{a: '2dsphere'}");
BSONObj infoObj = fromjson("{key: {a: '2dsphere'}, '2dsphereIndexVersion': 3}");
@@ -244,8 +241,7 @@ TEST(S2KeyGeneratorTest, CollationAppliedToNonGeoStringFieldBeforeGeoField) {
BSONObjSet expectedKeys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
expectedKeys.insert(BSON(""
<< "gnirts"
- << ""
- << getCellID(0, 0)));
+ << "" << getCellID(0, 0)));
assertKeysetsEqual(expectedKeys, actualKeys);
assertMultikeyPathsEqual(MultikeyPaths{std::set<size_t>{}, std::set<size_t>{}},
@@ -267,9 +263,7 @@ TEST(S2KeyGeneratorTest, CollationAppliedToAllNonGeoStringFields) {
BSONObjSet expectedKeys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
expectedKeys.insert(BSON(""
<< "gnirts"
- << ""
- << getCellID(0, 0)
- << ""
+ << "" << getCellID(0, 0) << ""
<< "2gnirts"));
assertKeysetsEqual(expectedKeys, actualKeys);
@@ -389,8 +383,9 @@ TEST(S2KeyGeneratorTest, CollationAppliedToStringsInNestedObjects) {
ExpressionKeysPrivate::getS2Keys(obj, keyPattern, params, &actualKeys, &actualMultikeyPaths);
BSONObjSet expectedKeys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
- expectedKeys.insert(BSON("" << getCellID(0, 0) << "" << BSON("c"
- << "gnirts")));
+ expectedKeys.insert(BSON("" << getCellID(0, 0) << ""
+ << BSON("c"
+ << "gnirts")));
assertKeysetsEqual(expectedKeys, actualKeys);
assertMultikeyPathsEqual(MultikeyPaths{std::set<size_t>{}, std::set<size_t>{}},
diff --git a/src/mongo/db/index/sort_key_generator_test.cpp b/src/mongo/db/index/sort_key_generator_test.cpp
index 7801c911bd9..e7dd4ec822f 100644
--- a/src/mongo/db/index/sort_key_generator_test.cpp
+++ b/src/mongo/db/index/sort_key_generator_test.cpp
@@ -150,8 +150,7 @@ DEATH_TEST(SortKeyGeneratorTest,
MONGO_COMPILER_VARIABLE_UNUSED auto ignored =
std::make_unique<SortKeyGenerator>(BSON("a" << BSON("$meta"
<< "textScore"
- << "extra"
- << 1)),
+ << "extra" << 1)),
nullptr);
}
diff --git a/src/mongo/db/index_builder.h b/src/mongo/db/index_builder.h
index ba2fc769a25..11eeeea971c 100644
--- a/src/mongo/db/index_builder.h
+++ b/src/mongo/db/index_builder.h
@@ -114,4 +114,4 @@ private:
std::string _name; // name of this builder, not related to the index
static AtomicWord<unsigned> _indexBuildCount;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 49100c3a2e3..2de2ff6622f 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -85,8 +85,7 @@ void checkShardKeyRestrictions(OperationContext* opCtx,
const ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
uassert(ErrorCodes::CannotCreateIndex,
str::stream() << "cannot create unique index over " << newIdxKey
- << " with shard key pattern "
- << shardKeyPattern.toBSON(),
+ << " with shard key pattern " << shardKeyPattern.toBSON(),
shardKeyPattern.isUniqueIndexCompatible(newIdxKey));
}
@@ -163,9 +162,9 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::startIndexRe
for (auto& spec : specs) {
std::string name = spec.getStringField(IndexDescriptor::kIndexNameFieldName);
if (name.empty()) {
- return Status(
- ErrorCodes::CannotCreateIndex,
- str::stream() << "Cannot create an index for a spec '" << spec
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream()
+ << "Cannot create an index for a spec '" << spec
<< "' without a non-empty string value for the 'name' field");
}
indexNames.push_back(name);
@@ -369,8 +368,7 @@ void IndexBuildsCoordinator::assertNoIndexBuildInProgress() const {
stdx::unique_lock<stdx::mutex> lk(_mutex);
uassert(ErrorCodes::BackgroundOperationInProgressForDatabase,
str::stream() << "cannot perform operation: there are currently "
- << _allIndexBuilds.size()
- << " index builds running.",
+ << _allIndexBuilds.size() << " index builds running.",
_allIndexBuilds.size() == 0);
}
@@ -487,12 +485,11 @@ Status IndexBuildsCoordinator::_registerIndexBuild(
auto registeredIndexBuilds =
collIndexBuildsIt->second->getIndexBuildState(lk, name);
return Status(ErrorCodes::IndexBuildAlreadyInProgress,
- str::stream() << "There's already an index with name '" << name
- << "' being built on the collection: "
- << " ( "
- << replIndexBuildState->collectionUUID
- << " ). Index build: "
- << registeredIndexBuilds->buildUUID);
+ str::stream()
+ << "There's already an index with name '" << name
+ << "' being built on the collection: "
+ << " ( " << replIndexBuildState->collectionUUID
+ << " ). Index build: " << registeredIndexBuilds->buildUUID);
}
}
}
@@ -847,8 +844,7 @@ void IndexBuildsCoordinator::_runIndexBuildInner(OperationContext* opCtx,
}
fassert(51101,
status.withContext(str::stream() << "Index build: " << replState->buildUUID
- << "; Database: "
- << replState->dbName));
+ << "; Database: " << replState->dbName));
}
uassertStatusOK(status);
@@ -942,21 +938,13 @@ void IndexBuildsCoordinator::_buildIndex(
invariant(db,
str::stream() << "Database not found after relocking. Index build: "
- << replState->buildUUID
- << ": "
- << nss
- << " ("
- << replState->collectionUUID
- << ")");
+ << replState->buildUUID << ": " << nss << " ("
+ << replState->collectionUUID << ")");
invariant(db->getCollection(opCtx, nss),
str::stream() << "Collection not found after relocking. Index build: "
- << replState->buildUUID
- << ": "
- << nss
- << " ("
- << replState->collectionUUID
- << ")");
+ << replState->buildUUID << ": " << nss << " ("
+ << replState->collectionUUID << ")");
// Perform the third and final drain after releasing a shared lock and reacquiring an
// exclusive lock on the database.
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 3e5a8e9937c..c3299d97463 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -237,8 +237,7 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
return Status(ErrorCodes::IndexNotFound,
str::stream()
<< "Cannot set a new commit quorum on an index build in collection '"
- << nss
- << "' without providing any indexes.");
+ << nss << "' without providing any indexes.");
}
AutoGetCollectionForRead autoColl(opCtx, nss);
@@ -272,10 +271,9 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
buildState->indexNames.begin(), buildState->indexNames.end(), indexNames.begin());
if (buildState->indexNames.size() != indexNames.size() || !equal) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "Provided indexes are not all being "
- << "built by the same index builder in collection '"
- << nss
- << "'.");
+ str::stream()
+ << "Provided indexes are not all being "
+ << "built by the same index builder in collection '" << nss << "'.");
}
// See if the new commit quorum is satisfiable.
diff --git a/src/mongo/db/index_builds_coordinator_mongod_test.cpp b/src/mongo/db/index_builds_coordinator_mongod_test.cpp
index fab73dd5224..6ba35b27e6b 100644
--- a/src/mongo/db/index_builds_coordinator_mongod_test.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod_test.cpp
@@ -96,8 +96,7 @@ std::vector<BSONObj> makeSpecs(const NamespaceString& nss, std::vector<std::stri
std::vector<BSONObj> indexSpecs;
for (auto keyName : keys) {
indexSpecs.push_back(BSON("ns" << nss.toString() << "v" << 2 << "key" << BSON(keyName << 1)
- << "name"
- << (keyName + "_1")));
+ << "name" << (keyName + "_1")));
}
return indexSpecs;
}
diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp
index 7f90cdeeaa8..7c48caa70b6 100644
--- a/src/mongo/db/initialize_server_global_state.cpp
+++ b/src/mongo/db/initialize_server_global_state.cpp
@@ -213,8 +213,8 @@ MONGO_INITIALIZER_GENERAL(
("default"))
(InitializerContext*) {
using logger::LogManager;
- using logger::MessageEventEphemeral;
using logger::MessageEventDetailsEncoder;
+ using logger::MessageEventEphemeral;
using logger::MessageEventWithContextEncoder;
using logger::MessageLogDomain;
using logger::RotatableFileAppender;
@@ -254,8 +254,8 @@ MONGO_INITIALIZER_GENERAL(
exists = boost::filesystem::exists(absoluteLogpath);
} catch (boost::filesystem::filesystem_error& e) {
return Status(ErrorCodes::FileNotOpen,
- str::stream() << "Failed probe for \"" << absoluteLogpath << "\": "
- << e.code().message());
+ str::stream() << "Failed probe for \"" << absoluteLogpath
+ << "\": " << e.code().message());
}
if (exists) {
@@ -276,9 +276,7 @@ MONGO_INITIALIZER_GENERAL(
return Status(ErrorCodes::FileRenameFailed,
str::stream()
<< "Could not rename preexisting log file \""
- << absoluteLogpath
- << "\" to \""
- << renameTarget
+ << absoluteLogpath << "\" to \"" << renameTarget
<< "\"; run with --logappend or manually remove file: "
<< ec.message());
}
diff --git a/src/mongo/db/initialize_server_security_state.cpp b/src/mongo/db/initialize_server_security_state.cpp
index b5d660869c4..cb9c29b63bd 100644
--- a/src/mongo/db/initialize_server_security_state.cpp
+++ b/src/mongo/db/initialize_server_security_state.cpp
@@ -64,9 +64,7 @@ bool initializeServerSecurityGlobalState(ServiceContext* service) {
clusterAuthMode == ServerGlobalParams::ClusterAuthMode_sendX509) {
auth::setInternalUserAuthParams(
BSON(saslCommandMechanismFieldName
- << "MONGODB-X509"
- << saslCommandUserDBFieldName
- << "$external"
+ << "MONGODB-X509" << saslCommandUserDBFieldName << "$external"
<< saslCommandUserFieldName
<< getSSLManager()->getSSLConfiguration().clientSubjectName.toString()));
}
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index dc9677cc78c..52c1435e4a9 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -49,9 +49,9 @@
namespace mongo {
-using std::unique_ptr;
using std::endl;
using std::string;
+using std::unique_ptr;
namespace {
diff --git a/src/mongo/db/keypattern.cpp b/src/mongo/db/keypattern.cpp
index 67fd8b08460..43a46da79de 100644
--- a/src/mongo/db/keypattern.cpp
+++ b/src/mongo/db/keypattern.cpp
@@ -96,8 +96,7 @@ BSONObj KeyPattern::extendRangeBound(const BSONObj& bound, bool makeUpperInclusi
BSONElement patElt = pat.next();
massert(16634,
str::stream() << "field names of bound " << bound
- << " do not match those of keyPattern "
- << _pattern,
+ << " do not match those of keyPattern " << _pattern,
srcElt.fieldNameStringData() == patElt.fieldNameStringData());
newBound.append(srcElt);
}
diff --git a/src/mongo/db/keypattern_test.cpp b/src/mongo/db/keypattern_test.cpp
index 45fef6a9e6f..fbb7c4e7af6 100644
--- a/src/mongo/db/keypattern_test.cpp
+++ b/src/mongo/db/keypattern_test.cpp
@@ -142,4 +142,4 @@ TEST(KeyPattern, GlobalMinMax) {
ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a.b.c" << -1)).globalMin(), BSON("a.b.c" << MAXKEY));
ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a.b.c" << -1)).globalMax(), BSON("a.b.c" << MINKEY));
}
-}
+} // namespace
diff --git a/src/mongo/db/keys_collection_cache.cpp b/src/mongo/db/keys_collection_cache.cpp
index 20e3273af35..c97697aea41 100644
--- a/src/mongo/db/keys_collection_cache.cpp
+++ b/src/mongo/db/keys_collection_cache.cpp
@@ -106,10 +106,8 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::getKeyById(long long key
return {ErrorCodes::KeyNotFound,
str::stream() << "Cache Reader No keys found for " << _purpose
- << " that is valid for time: "
- << forThisTime.toString()
- << " with id: "
- << keyId};
+ << " that is valid for time: " << forThisTime.toString()
+ << " with id: " << keyId};
}
StatusWith<KeysCollectionDocument> KeysCollectionCache::getKey(const LogicalTime& forThisTime) {
diff --git a/src/mongo/db/keys_collection_client.h b/src/mongo/db/keys_collection_client.h
index 54ac6fedc44..debff147f53 100644
--- a/src/mongo/db/keys_collection_client.h
+++ b/src/mongo/db/keys_collection_client.h
@@ -56,8 +56,8 @@ public:
bool useMajority) = 0;
/**
- * Directly inserts a key document to the storage
- */
+ * Directly inserts a key document to the storage
+ */
virtual Status insertNewKey(OperationContext* opCtx, const BSONObj& doc) = 0;
/**
diff --git a/src/mongo/db/keys_collection_client_direct.h b/src/mongo/db/keys_collection_client_direct.h
index 9ad5dbb7490..6e96d8e94ed 100644
--- a/src/mongo/db/keys_collection_client_direct.h
+++ b/src/mongo/db/keys_collection_client_direct.h
@@ -55,8 +55,8 @@ public:
bool useMajority) override;
/**
- * Directly inserts a key document to the storage
- */
+ * Directly inserts a key document to the storage
+ */
Status insertNewKey(OperationContext* opCtx, const BSONObj& doc) override;
/**
diff --git a/src/mongo/db/keys_collection_client_sharded.h b/src/mongo/db/keys_collection_client_sharded.h
index eabd0f2051d..111948e0139 100644
--- a/src/mongo/db/keys_collection_client_sharded.h
+++ b/src/mongo/db/keys_collection_client_sharded.h
@@ -49,8 +49,8 @@ public:
bool useMajority) override;
/**
- * Directly inserts a key document to the storage
- */
+ * Directly inserts a key document to the storage
+ */
Status insertNewKey(OperationContext* opCtx, const BSONObj& doc) override;
bool supportsMajorityReads() const final {
diff --git a/src/mongo/db/lasterror.cpp b/src/mongo/db/lasterror.cpp
index b9f6aaaedbd..1d919a14495 100644
--- a/src/mongo/db/lasterror.cpp
+++ b/src/mongo/db/lasterror.cpp
@@ -53,7 +53,7 @@ void appendDupKeyFields(BSONObjBuilder& builder, std::string errMsg) {
builder.append("ns", collName);
builder.append("index", indexName);
}
-}
+} // namespace
void LastError::reset(bool valid) {
*this = LastError();
diff --git a/src/mongo/db/log_process_details.cpp b/src/mongo/db/log_process_details.cpp
index 9435fc24485..8f7bd8cf5ba 100644
--- a/src/mongo/db/log_process_details.cpp
+++ b/src/mongo/db/log_process_details.cpp
@@ -82,4 +82,4 @@ void logProcessDetailsForLogRotate(ServiceContext* serviceContext) {
logProcessDetails();
}
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/logical_clock.cpp b/src/mongo/db/logical_clock.cpp
index fbd87f49421..415566094d2 100644
--- a/src/mongo/db/logical_clock.cpp
+++ b/src/mongo/db/logical_clock.cpp
@@ -50,7 +50,7 @@ bool lessThanOrEqualToMaxPossibleTime(LogicalTime time, uint64_t nTicks) {
return time.asTimestamp().getSecs() <= LogicalClock::kMaxSignedInt &&
time.asTimestamp().getInc() <= (LogicalClock::kMaxSignedInt - nTicks);
}
-}
+} // namespace
LogicalTime LogicalClock::getClusterTimeForReplicaSet(OperationContext* opCtx) {
if (getGlobalReplSettings().usingReplSets()) {
@@ -166,8 +166,7 @@ Status LogicalClock::_passesRateLimiter_inlock(LogicalTime newTime) {
return Status(ErrorCodes::ClusterTimeFailsRateLimiter,
str::stream() << "New cluster time, " << newTimeSecs
<< ", is too far from this node's wall clock time, "
- << wallClockSecs
- << ".");
+ << wallClockSecs << ".");
}
uassert(40484,
diff --git a/src/mongo/db/logical_session_cache_test.cpp b/src/mongo/db/logical_session_cache_test.cpp
index ec615a6490c..ca23c62e655 100644
--- a/src/mongo/db/logical_session_cache_test.cpp
+++ b/src/mongo/db/logical_session_cache_test.cpp
@@ -350,8 +350,9 @@ TEST_F(LogicalSessionCacheTest, RefreshMatrixSessionState) {
failText << " session case failed: ";
ASSERT(sessions()->has(ids[i]) == testCases[i].inCollection)
- << failText.str() << (testCases[i].inCollection ? "session wasn't in collection"
- : "session was in collection");
+ << failText.str()
+ << (testCases[i].inCollection ? "session wasn't in collection"
+ : "session was in collection");
ASSERT((service()->matchKilled(ids[i]) != nullptr) == testCases[i].killed)
<< failText.str()
<< (testCases[i].killed ? "session wasn't killed" : "session was killed");
diff --git a/src/mongo/db/logical_session_id_test.cpp b/src/mongo/db/logical_session_id_test.cpp
index fac7ab0c5c5..30a2529fec6 100644
--- a/src/mongo/db/logical_session_id_test.cpp
+++ b/src/mongo/db/logical_session_id_test.cpp
@@ -284,14 +284,14 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SessionIdAndTransact
LogicalSessionFromClient lsid;
lsid.setId(UUID::gen());
- initializeOperationSessionInfo(
- _opCtx.get(),
- BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL << "OtherField"
- << "TestField"),
- true,
- true,
- true,
- true);
+ initializeOperationSessionInfo(_opCtx.get(),
+ BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber"
+ << 100LL << "OtherField"
+ << "TestField"),
+ true,
+ true,
+ true,
+ true);
ASSERT(_opCtx->getLogicalSessionId());
ASSERT_EQ(lsid.getId(), _opCtx->getLogicalSessionId()->getId());
@@ -306,14 +306,14 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_IsReplSetMemberOrMon
lsid.setId(UUID::gen());
ASSERT_THROWS_CODE(
- initializeOperationSessionInfo(
- _opCtx.get(),
- BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL << "OtherField"
- << "TestField"),
- true,
- true,
- false,
- true),
+ initializeOperationSessionInfo(_opCtx.get(),
+ BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber"
+ << 100LL << "OtherField"
+ << "TestField"),
+ true,
+ true,
+ false,
+ true),
AssertionException,
ErrorCodes::IllegalOperation);
}
@@ -324,14 +324,14 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SupportsDocLockingFa
lsid.setId(UUID::gen());
ASSERT_THROWS_CODE(
- initializeOperationSessionInfo(
- _opCtx.get(),
- BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL << "OtherField"
- << "TestField"),
- true,
- true,
- true,
- false),
+ initializeOperationSessionInfo(_opCtx.get(),
+ BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber"
+ << 100LL << "OtherField"
+ << "TestField"),
+ true,
+ true,
+ true,
+ false),
AssertionException,
ErrorCodes::IllegalOperation);
}
diff --git a/src/mongo/db/logical_time_test.cpp b/src/mongo/db/logical_time_test.cpp
index a03497b416f..19c3d5832b5 100644
--- a/src/mongo/db/logical_time_test.cpp
+++ b/src/mongo/db/logical_time_test.cpp
@@ -28,8 +28,8 @@
*/
-#include "mongo/db/logical_time.h"
#include "mongo/bson/timestamp.h"
+#include "mongo/db/logical_time.h"
#include "mongo/db/signed_logical_time.h"
#include "mongo/db/time_proof_service.h"
#include "mongo/platform/basic.h"
@@ -119,10 +119,10 @@ TEST(LogicalTime, appendAsOperationTime) {
}
TEST(LogicalTime, fromOperationTime) {
- const auto actualTime = LogicalTime::fromOperationTime(BSON("someOtherCommandParameter"
- << "Value"
- << "operationTime"
- << Timestamp(1)));
+ const auto actualTime =
+ LogicalTime::fromOperationTime(BSON("someOtherCommandParameter"
+ << "Value"
+ << "operationTime" << Timestamp(1)));
ASSERT_EQ(LogicalTime(Timestamp(1)), actualTime);
}
diff --git a/src/mongo/db/matcher/expression.cpp b/src/mongo/db/matcher/expression.cpp
index 364ebdd68d7..649eb1a6e77 100644
--- a/src/mongo/db/matcher/expression.cpp
+++ b/src/mongo/db/matcher/expression.cpp
@@ -95,4 +95,4 @@ void MatchExpression::addDependencies(DepsTracker* deps) const {
_doAddDependencies(deps);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression.h b/src/mongo/db/matcher/expression.h
index c01bee6c697..e8f13a0057e 100644
--- a/src/mongo/db/matcher/expression.h
+++ b/src/mongo/db/matcher/expression.h
@@ -365,4 +365,4 @@ private:
MatchType _matchType;
std::unique_ptr<TagData> _tagData;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_array.cpp b/src/mongo/db/matcher/expression_array.cpp
index c00d5899eda..8607611b9f7 100644
--- a/src/mongo/db/matcher/expression_array.cpp
+++ b/src/mongo/db/matcher/expression_array.cpp
@@ -238,4 +238,4 @@ bool SizeMatchExpression::equivalent(const MatchExpression* other) const {
// ------------------
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_array.h b/src/mongo/db/matcher/expression_array.h
index dc27f2f2ef0..5f6933ad2d7 100644
--- a/src/mongo/db/matcher/expression_array.h
+++ b/src/mongo/db/matcher/expression_array.h
@@ -117,8 +117,8 @@ private:
class ElemMatchValueMatchExpression : public ArrayMatchingMatchExpression {
public:
/**
- * This constructor takes ownership of 'sub.'
- */
+ * This constructor takes ownership of 'sub.'
+ */
ElemMatchValueMatchExpression(StringData path, MatchExpression* sub);
explicit ElemMatchValueMatchExpression(StringData path);
virtual ~ElemMatchValueMatchExpression();
@@ -207,4 +207,4 @@ private:
int _size; // >= 0 real, < 0, nothing will match
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp
index 67ee37060d7..9272148b323 100644
--- a/src/mongo/db/matcher/expression_geo.cpp
+++ b/src/mongo/db/matcher/expression_geo.cpp
@@ -132,8 +132,8 @@ Status GeoExpression::parseFrom(const BSONObj& obj) {
if (GeoExpression::INTERSECT == predicate) {
if (!geoContainer->supportsProject(SPHERE)) {
return Status(ErrorCodes::BadValue,
- str::stream() << "$geoIntersect not supported with provided geometry: "
- << obj);
+ str::stream()
+ << "$geoIntersect not supported with provided geometry: " << obj);
}
geoContainer->projectInto(SPHERE);
}
@@ -218,8 +218,7 @@ Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
return Status(ErrorCodes::BadValue,
str::stream()
<< "geo near accepts just one argument when querying for a GeoJSON "
- << "point. Extra field found: "
- << objIt.next());
+ << "point. Extra field found: " << objIt.next());
}
// Parse "new" near:
@@ -247,9 +246,7 @@ Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
return Status(ErrorCodes::BadValue,
str::stream()
<< "invalid point in geo near query $geometry argument: "
- << embeddedObj
- << " "
- << status.reason());
+ << embeddedObj << " " << status.reason());
}
uassert(16681,
"$near requires geojson point, given " + embeddedObj.toString(),
@@ -326,16 +323,16 @@ Status GeoNearExpression::parseFrom(const BSONObj& obj) {
//
/**
-* Takes ownership of the passed-in GeoExpression.
-*/
+ * Takes ownership of the passed-in GeoExpression.
+ */
GeoMatchExpression::GeoMatchExpression(StringData path,
const GeoExpression* query,
const BSONObj& rawObj)
: LeafMatchExpression(GEO, path), _rawObj(rawObj), _query(query), _canSkipValidation(false) {}
/**
-* Takes shared ownership of the passed-in GeoExpression.
-*/
+ * Takes shared ownership of the passed-in GeoExpression.
+ */
GeoMatchExpression::GeoMatchExpression(StringData path,
std::shared_ptr<const GeoExpression> query,
const BSONObj& rawObj)
@@ -467,4 +464,4 @@ std::unique_ptr<MatchExpression> GeoNearMatchExpression::shallowClone() const {
}
return std::move(next);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_geo_test.cpp b/src/mongo/db/matcher/expression_geo_test.cpp
index 5d8d6744a34..4115285de18 100644
--- a/src/mongo/db/matcher/expression_geo_test.cpp
+++ b/src/mongo/db/matcher/expression_geo_test.cpp
@@ -182,4 +182,4 @@ TEST(ExpressionGeoTest, GeoNearNotEquivalent) {
gne2(makeGeoNearMatchExpression(query2));
ASSERT(!gne1->equivalent(gne2.get()));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp
index ffb18cfc71b..3a56ff27a9d 100644
--- a/src/mongo/db/matcher/expression_leaf.cpp
+++ b/src/mongo/db/matcher/expression_leaf.cpp
@@ -806,4 +806,4 @@ bool BitTestMatchExpression::equivalent(const MatchExpression* other) const {
return path() == realOther->path() && myBitPositions == otherBitPositions;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_leaf.h b/src/mongo/db/matcher/expression_leaf.h
index df0654e8c87..27ecb29622f 100644
--- a/src/mongo/db/matcher/expression_leaf.h
+++ b/src/mongo/db/matcher/expression_leaf.h
@@ -41,7 +41,7 @@
namespace pcrecpp {
class RE;
-} // namespace pcrecpp;
+} // namespace pcrecpp
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_leaf_test.cpp b/src/mongo/db/matcher/expression_leaf_test.cpp
index 859fbe3bc98..46ea3e0d347 100644
--- a/src/mongo/db/matcher/expression_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_leaf_test.cpp
@@ -1861,4 +1861,4 @@ TEST(BitTestMatchExpression, DoesNotMatchBinaryWithBitMask) {
ASSERT(banyc.matchesSingleElement(match1["a"]));
ASSERT(banyc.matchesSingleElement(match2["a"]));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_parser.cpp b/src/mongo/db/matcher/expression_parser.cpp
index 811c0f323ed..fac9967cf2e 100644
--- a/src/mongo/db/matcher/expression_parser.cpp
+++ b/src/mongo/db/matcher/expression_parser.cpp
@@ -256,8 +256,8 @@ StatusWithMatchExpression parse(const BSONObj& obj,
if (!parseExpressionMatchFunction) {
return {Status(ErrorCodes::BadValue,
- str::stream() << "unknown top level operator: "
- << e.fieldNameStringData())};
+ str::stream()
+ << "unknown top level operator: " << e.fieldNameStringData())};
}
auto parsedExpression = parseExpressionMatchFunction(
@@ -570,8 +570,7 @@ StatusWith<std::vector<uint32_t>> parseBitPositionsArray(const BSONObj& theArray
return Status(
ErrorCodes::BadValue,
str::stream()
- << "bit positions cannot be represented as a 32-bit signed integer: "
- << e);
+ << "bit positions cannot be represented as a 32-bit signed integer: " << e);
}
// This checks if e is integral.
@@ -590,8 +589,7 @@ StatusWith<std::vector<uint32_t>> parseBitPositionsArray(const BSONObj& theArray
return Status(
ErrorCodes::BadValue,
str::stream()
- << "bit positions cannot be represented as a 32-bit signed integer: "
- << e);
+ << "bit positions cannot be represented as a 32-bit signed integer: " << e);
}
}
@@ -636,9 +634,9 @@ StatusWithMatchExpression parseBitTest(StringData name, BSONElement e) {
auto eBinary = e.binData(eBinaryLen);
bitTestMatchExpression = std::make_unique<T>(name, eBinary, eBinaryLen);
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << name << " takes an Array, a number, or a BinData but received: " << e);
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << name << " takes an Array, a number, or a BinData but received: " << e);
}
return {std::move(bitTestMatchExpression)};
@@ -693,8 +691,7 @@ StatusWithMatchExpression parseInternalSchemaRootDocEq(
if (elem.type() != BSONType::Object) {
return {Status(ErrorCodes::TypeMismatch,
str::stream() << InternalSchemaRootDocEqMatchExpression::kName
- << " must be an object, found type "
- << elem.type())};
+ << " must be an object, found type " << elem.type())};
}
auto rootDocEq =
std::make_unique<InternalSchemaRootDocEqMatchExpression>(elem.embeddedObject());
@@ -751,8 +748,7 @@ StatusWith<StringData> parseNamePlaceholder(const BSONObj& containingObject,
} else if (namePlaceholderElem.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
str::stream() << expressionName << " requires '" << namePlaceholderFieldName
- << "' to be a string, not "
- << namePlaceholderElem.type()};
+ << "' to be a string, not " << namePlaceholderElem.type()};
}
return {namePlaceholderElem.valueStringData()};
}
@@ -804,12 +800,9 @@ StatusWith<std::unique_ptr<ExpressionWithPlaceholder>> parseExprWithPlaceholder(
if (placeholder && (*placeholder != expectedPlaceholder)) {
return {ErrorCodes::FailedToParse,
str::stream() << expressionName << " expected a name placeholder of "
- << expectedPlaceholder
- << ", but '"
+ << expectedPlaceholder << ", but '"
<< exprWithPlaceholderElem.fieldNameStringData()
- << "' has a mismatching placeholder '"
- << *placeholder
- << "'"};
+ << "' has a mismatching placeholder '" << *placeholder << "'"};
}
return result;
}
@@ -1249,8 +1242,7 @@ StatusWithMatchExpression parseInternalSchemaFixedArityArgument(
if (static_cast<size_t>(inputObj.nFields()) != arity) {
return {ErrorCodes::FailedToParse,
str::stream() << elem.fieldNameStringData() << " requires exactly " << arity
- << " MatchExpressions, but got "
- << inputObj.nFields()};
+ << " MatchExpressions, but got " << inputObj.nFields()};
}
// Fill out 'expressions' with all of the parsed subexpressions contained in the array,
@@ -1321,17 +1313,16 @@ StatusWithMatchExpression parseInternalSchemaBinDataSubType(StringData name, BSO
auto valueAsInt = e.parseIntegerElementToInt();
if (!valueAsInt.isOK()) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Invalid numerical BinData subtype value for "
- << InternalSchemaBinDataSubTypeExpression::kName
- << ": "
- << e.number());
+ str::stream()
+ << "Invalid numerical BinData subtype value for "
+ << InternalSchemaBinDataSubTypeExpression::kName << ": " << e.number());
}
if (!isValidBinDataType(valueAsInt.getValue())) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << InternalSchemaBinDataSubTypeExpression::kName
- << " value must represent BinData subtype: "
- << valueAsInt.getValue());
+ str::stream()
+ << InternalSchemaBinDataSubTypeExpression::kName
+ << " value must represent BinData subtype: " << valueAsInt.getValue());
}
return {std::make_unique<InternalSchemaBinDataSubTypeExpression>(
diff --git a/src/mongo/db/matcher/expression_parser_array_test.cpp b/src/mongo/db/matcher/expression_parser_array_test.cpp
index 534b20f3a1e..8ead6ff5d2b 100644
--- a/src/mongo/db/matcher/expression_parser_array_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_array_test.cpp
@@ -198,16 +198,12 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef1) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << BSON("$eq" << match)));
@@ -224,16 +220,12 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef2) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << match));
@@ -251,17 +243,11 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef3) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "foo"
- << 12345);
+ << "$id" << oidx << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << match));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -273,14 +259,10 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef3) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
}
// Query with DBRef fields out of order.
@@ -288,22 +270,16 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef4) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db");
BSONObj matchOutOfOrder = BSON("$db"
<< "db"
- << "$id"
- << oid
- << "$ref"
+ << "$id" << oid << "$ref"
<< "coll");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << matchOutOfOrder));
@@ -322,19 +298,13 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef5) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
BSONObj matchOutOfOrder = BSON("foo" << 12345 << "$id" << oid << "$ref"
<< "coll");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "foo"
- << 12345);
+ << "$id" << oidx << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchOutOfOrder));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -346,14 +316,10 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef5) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
}
// Incomplete DBRef - $id missing.
@@ -361,20 +327,13 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef6) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
BSONObj matchMissingID = BSON("$ref"
<< "coll"
- << "foo"
- << 12345);
+ << "foo" << 12345);
BSONObj notMatch = BSON("$ref"
<< "collx"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchMissingID));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -386,14 +345,10 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef6) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
}
// Incomplete DBRef - $ref missing.
@@ -401,18 +356,12 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef7) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
BSONObj matchMissingRef = BSON("$id" << oid << "foo" << 12345);
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "foo"
- << 12345);
+ << "$id" << oidx << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchMissingRef));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -424,14 +373,10 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef7) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
}
// Incomplete DBRef - $db only.
@@ -439,24 +384,17 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef8) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"
- << "foo"
- << 12345);
+ << "foo" << 12345);
BSONObj matchDBOnly = BSON("$db"
<< "db"
- << "foo"
- << 12345);
+ << "foo" << 12345);
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "dbx"
- << "foo"
- << 12345);
+ << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchDBOnly));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -468,16 +406,12 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef8) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "$db"
- << "db"
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db"
+ << "foo" << 12345 << "bar" << 678)))));
}
TEST(MatchExpressionParserArrayTest, All1) {
@@ -843,4 +777,4 @@ TEST(MatchExpressionParserArrayTest, AllStringCollation) {
EqualityMatchExpression* eqMatch = static_cast<EqualityMatchExpression*>(child);
ASSERT_TRUE(eqMatch->getCollator() == &collator);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_parser_leaf_test.cpp b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
index acf3ec3742b..ad76f13ff92 100644
--- a/src/mongo/db/matcher/expression_parser_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
@@ -435,9 +435,7 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
OID oid = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"))));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
@@ -446,15 +444,11 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
<< "coll"
@@ -470,39 +464,28 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "dbx"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$db"
<< "db"
<< "$ref"
<< "coll"
- << "$id"
- << oid))));
+ << "$id" << oid))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
}
@@ -511,15 +494,11 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
OID oidy = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"))));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
@@ -528,15 +507,11 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
<< "coll"
@@ -544,15 +519,11 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oid << "$ref"
<< "coll"
@@ -560,9 +531,7 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "dbx")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oidy << "$ref"
<< "colly"
@@ -570,87 +539,59 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "colly"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "dbx")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "colly"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "colly"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db")))));
}
@@ -658,10 +599,7 @@ TEST(MatchExpressionParserLeafTest, INDBRefWithOptionalField1) {
OID oid = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345))));
+ << "$id" << oid << "foo" << 12345))));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
@@ -669,28 +607,19 @@ TEST(MatchExpressionParserLeafTest, INDBRefWithOptionalField1) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345)))));
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "collx"
- << "$id"
- << oidx
- << "foo"
- << 12345)
- << BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "collx"
+ << "$id" << oidx << "foo" << 12345)
+ << BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345)))));
}
TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
@@ -704,8 +633,7 @@ TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
// second field is not $id
query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$foo"
- << 1))));
+ << "$foo" << 1))));
result = MatchExpressionParser::parse(query, expCtx);
ASSERT_NOT_OK(result.getStatus());
@@ -719,8 +647,7 @@ TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
// missing $id and $ref field
query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$db"
<< "test"
- << "foo"
- << 3))));
+ << "foo" << 3))));
result = MatchExpressionParser::parse(query, expCtx);
ASSERT_NOT_OK(result.getStatus());
}
diff --git a/src/mongo/db/matcher/expression_parser_test.cpp b/src/mongo/db/matcher/expression_parser_test.cpp
index 17e77fa2522..e60bd62ccc0 100644
--- a/src/mongo/db/matcher/expression_parser_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_test.cpp
@@ -258,8 +258,7 @@ TEST(MatchExpressionParserTest, RegexParsesSuccessfullyWithOptionsNotInline) {
TEST(MatchExpressionParserTest, RegexDoesNotParseSuccessfullyWithMultipleOptions) {
auto query = BSON("a" << BSON("$options"
<< "s"
- << "$regex"
- << BSONRegEx("/myRegex/", "i")));
+ << "$regex" << BSONRegEx("/myRegex/", "i")));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
ASSERT_NOT_OK(MatchExpressionParser::parse(query, expCtx).getStatus());
}
@@ -267,8 +266,7 @@ TEST(MatchExpressionParserTest, RegexDoesNotParseSuccessfullyWithMultipleOptions
TEST(MatchExpressionParserTest, RegexParsesSuccessfullyWithOptionsFirst) {
auto query = BSON("a" << BSON("$options"
<< "s"
- << "$regex"
- << BSONRegEx("/myRegex/", "")));
+ << "$regex" << BSONRegEx("/myRegex/", "")));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
ASSERT_OK(MatchExpressionParser::parse(query, expCtx).getStatus());
}
@@ -276,8 +274,7 @@ TEST(MatchExpressionParserTest, RegexParsesSuccessfullyWithOptionsFirst) {
TEST(MatchExpressionParserTest, RegexParsesSuccessfullyWithOptionsFirstEmptyOptions) {
auto query = BSON("a" << BSON("$options"
<< ""
- << "$regex"
- << BSONRegEx("/myRegex/", "")));
+ << "$regex" << BSONRegEx("/myRegex/", "")));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
ASSERT_OK(MatchExpressionParser::parse(query, expCtx).getStatus());
}
diff --git a/src/mongo/db/matcher/expression_parser_tree_test.cpp b/src/mongo/db/matcher/expression_parser_tree_test.cpp
index 0cc3a23f06a..9aa066b7cca 100644
--- a/src/mongo/db/matcher/expression_parser_tree_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_tree_test.cpp
@@ -116,4 +116,4 @@ TEST(MatchExpressionParserLeafTest, NotRegex1) {
ASSERT(result.getValue()->matchesBSON(BSON("x"
<< "AC")));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_text.cpp b/src/mongo/db/matcher/expression_text.cpp
index 49fb5971739..fdf4e664595 100644
--- a/src/mongo/db/matcher/expression_text.cpp
+++ b/src/mongo/db/matcher/expression_text.cpp
@@ -63,16 +63,14 @@ TextMatchExpression::TextMatchExpression(OperationContext* opCtx,
uassert(ErrorCodes::IndexNotFound,
str::stream() << "text index required for $text query (no such collection '"
- << nss.ns()
- << "')",
+ << nss.ns() << "')",
db);
Collection* collection = db->getCollection(opCtx, nss);
uassert(ErrorCodes::IndexNotFound,
str::stream() << "text index required for $text query (no such collection '"
- << nss.ns()
- << "')",
+ << nss.ns() << "')",
collection);
std::vector<const IndexDescriptor*> idxMatches;
diff --git a/src/mongo/db/matcher/expression_text_base.cpp b/src/mongo/db/matcher/expression_text_base.cpp
index e9d18bb2e2e..d98efdc684a 100644
--- a/src/mongo/db/matcher/expression_text_base.cpp
+++ b/src/mongo/db/matcher/expression_text_base.cpp
@@ -60,10 +60,8 @@ void TextMatchExpressionBase::serialize(BSONObjBuilder* out) const {
const fts::FTSQuery& ftsQuery = getFTSQuery();
out->append("$text",
BSON("$search" << ftsQuery.getQuery() << "$language" << ftsQuery.getLanguage()
- << "$caseSensitive"
- << ftsQuery.getCaseSensitive()
- << "$diacriticSensitive"
- << ftsQuery.getDiacriticSensitive()));
+ << "$caseSensitive" << ftsQuery.getCaseSensitive()
+ << "$diacriticSensitive" << ftsQuery.getDiacriticSensitive()));
}
bool TextMatchExpressionBase::equivalent(const MatchExpression* other) const {
diff --git a/src/mongo/db/matcher/expression_tree.cpp b/src/mongo/db/matcher/expression_tree.cpp
index 1fb9cbc4af5..cb1eb248898 100644
--- a/src/mongo/db/matcher/expression_tree.cpp
+++ b/src/mongo/db/matcher/expression_tree.cpp
@@ -419,4 +419,4 @@ MatchExpression::ExpressionOptimizerFunc NotMatchExpression::getOptimizer() cons
return expression;
};
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_tree.h b/src/mongo/db/matcher/expression_tree.h
index b08ce6b03fb..78d429fd1e7 100644
--- a/src/mongo/db/matcher/expression_tree.h
+++ b/src/mongo/db/matcher/expression_tree.h
@@ -248,4 +248,4 @@ private:
std::unique_ptr<MatchExpression> _exp;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_tree_test.cpp b/src/mongo/db/matcher/expression_tree_test.cpp
index 33bf9352e33..f5efdd1c573 100644
--- a/src/mongo/db/matcher/expression_tree_test.cpp
+++ b/src/mongo/db/matcher/expression_tree_test.cpp
@@ -321,4 +321,4 @@ TEST(NorOp, Equivalent) {
ASSERT(e1.equivalent(&e1));
ASSERT(!e1.equivalent(&e2));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_type_test.cpp b/src/mongo/db/matcher/expression_type_test.cpp
index b4e9ed4b405..70f924cdf5f 100644
--- a/src/mongo/db/matcher/expression_type_test.cpp
+++ b/src/mongo/db/matcher/expression_type_test.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/db/matcher/expression_type.h"
#include "mongo/bson/json.h"
+#include "mongo/db/matcher/expression_type.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -314,4 +314,4 @@ TEST(InternalSchemaBinDataEncryptedTypeTest, DoesNotTraverseLeafArrays) {
}
} // namespace
-} // namepace mongo
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_where.cpp b/src/mongo/db/matcher/expression_where.cpp
index a171c10c140..f644f737bf4 100644
--- a/src/mongo/db/matcher/expression_where.cpp
+++ b/src/mongo/db/matcher/expression_where.cpp
@@ -46,9 +46,9 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
WhereMatchExpression::WhereMatchExpression(OperationContext* opCtx,
WhereParams params,
@@ -110,4 +110,4 @@ unique_ptr<MatchExpression> WhereMatchExpression::shallowClone() const {
}
return std::move(e);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_where_noop.cpp b/src/mongo/db/matcher/expression_where_noop.cpp
index 8372b0b49fe..3328c18ea09 100644
--- a/src/mongo/db/matcher/expression_where_noop.cpp
+++ b/src/mongo/db/matcher/expression_where_noop.cpp
@@ -54,4 +54,4 @@ std::unique_ptr<MatchExpression> WhereNoOpMatchExpression::shallowClone() const
}
return std::move(e);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_with_placeholder.cpp b/src/mongo/db/matcher/expression_with_placeholder.cpp
index 408232d9f3f..4bc6ea6708f 100644
--- a/src/mongo/db/matcher/expression_with_placeholder.cpp
+++ b/src/mongo/db/matcher/expression_with_placeholder.cpp
@@ -65,11 +65,9 @@ StatusWith<boost::optional<StringData>> parseTopLevelFieldName(MatchExpression*
if (statusWithId.getValue() && placeholder != statusWithId.getValue()) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Expected a single top-level field name, found '"
- << *placeholder
- << "' and '"
- << *statusWithId.getValue()
- << "'");
+ str::stream()
+ << "Expected a single top-level field name, found '"
+ << *placeholder << "' and '" << *statusWithId.getValue() << "'");
}
}
return placeholder;
@@ -105,8 +103,7 @@ StatusWith<std::unique_ptr<ExpressionWithPlaceholder>> ExpressionWithPlaceholder
return Status(ErrorCodes::BadValue,
str::stream() << "The top-level field name must be an alphanumeric "
"string beginning with a lowercase letter, found '"
- << *placeholder
- << "'");
+ << *placeholder << "'");
}
}
diff --git a/src/mongo/db/matcher/match_details.cpp b/src/mongo/db/matcher/match_details.cpp
index 734ba6165e2..be9c657c3d9 100644
--- a/src/mongo/db/matcher/match_details.cpp
+++ b/src/mongo/db/matcher/match_details.cpp
@@ -68,4 +68,4 @@ string MatchDetails::toString() const {
ss << "elemMatchKey: " << (_elemMatchKey ? _elemMatchKey->c_str() : "NONE") << " ";
return ss.str();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/match_details.h b/src/mongo/db/matcher/match_details.h
index 9b364b34130..aadb5552b9f 100644
--- a/src/mongo/db/matcher/match_details.h
+++ b/src/mongo/db/matcher/match_details.h
@@ -77,4 +77,4 @@ private:
bool _elemMatchKeyRequested;
std::unique_ptr<std::string> _elemMatchKey;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/matchable.cpp b/src/mongo/db/matcher/matchable.cpp
index be404399189..5c5bfa55fd3 100644
--- a/src/mongo/db/matcher/matchable.cpp
+++ b/src/mongo/db/matcher/matchable.cpp
@@ -38,4 +38,4 @@ BSONMatchableDocument::BSONMatchableDocument(const BSONObj& obj) : _obj(obj) {
}
BSONMatchableDocument::~BSONMatchableDocument() {}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/matchable.h b/src/mongo/db/matcher/matchable.h
index b0e7a601b89..062a3f28826 100644
--- a/src/mongo/db/matcher/matchable.h
+++ b/src/mongo/db/matcher/matchable.h
@@ -48,7 +48,7 @@ public:
* The neewly returned ElementIterator is allowed to keep a pointer to path.
* So the caller of this function should make sure path is in scope until
* the ElementIterator is deallocated
- */
+ */
virtual ElementIterator* allocateIterator(const ElementPath* path) const = 0;
virtual void releaseIterator(ElementIterator* iterator) const = 0;
@@ -148,4 +148,4 @@ private:
mutable BSONElementIterator _iterator;
mutable bool _iteratorUsed;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/path.cpp b/src/mongo/db/matcher/path.cpp
index e06412814df..beed8706527 100644
--- a/src/mongo/db/matcher/path.cpp
+++ b/src/mongo/db/matcher/path.cpp
@@ -359,4 +359,4 @@ ElementIterator::Context BSONElementIterator::next() {
_next.reset();
return x;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/path.h b/src/mongo/db/matcher/path.h
index 1c0500e104f..88d759462f5 100644
--- a/src/mongo/db/matcher/path.h
+++ b/src/mongo/db/matcher/path.h
@@ -260,4 +260,4 @@ private:
std::unique_ptr<ElementIterator> _subCursor;
std::unique_ptr<ElementPath> _subCursorPath;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/path_accepting_keyword_test.cpp b/src/mongo/db/matcher/path_accepting_keyword_test.cpp
index 2a69e76afcf..52b42e5d959 100644
--- a/src/mongo/db/matcher/path_accepting_keyword_test.cpp
+++ b/src/mongo/db/matcher/path_accepting_keyword_test.cpp
@@ -49,33 +49,42 @@ TEST(PathAcceptingKeyword, CanParseKnownMatchTypes) {
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$in" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::NOT_EQUAL ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$ne" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::SIZE == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$size" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::SIZE ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$size" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::ALL ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$all" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::NOT_IN ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$nin" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::EXISTS == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$exists" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::EXISTS ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$exists" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::MOD ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$mod" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::TYPE == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$type" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::REGEX == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$regex" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::OPTIONS == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$options" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::TYPE ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$type" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::REGEX ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$regex" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::OPTIONS ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$options" << 1).firstElement()));
ASSERT_TRUE(
PathAcceptingKeyword::ELEM_MATCH ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$elemMatch" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::GEO_NEAR == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$near" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::GEO_NEAR == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$geoNear" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::WITHIN == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$within" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::WITHIN == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$geoWithin" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::GEO_NEAR ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$near" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::GEO_NEAR ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$geoNear" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::WITHIN ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$within" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::WITHIN ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$geoWithin" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::GEO_INTERSECTS ==
MatchExpressionParser::parsePathAcceptingKeyword(
BSON("$geoIntersects" << 1).firstElement()));
diff --git a/src/mongo/db/matcher/path_test.cpp b/src/mongo/db/matcher/path_test.cpp
index af7856d366a..dd0d7314ca9 100644
--- a/src/mongo/db/matcher/path_test.cpp
+++ b/src/mongo/db/matcher/path_test.cpp
@@ -566,4 +566,4 @@ TEST(SingleElementElementIterator, Simple1) {
ASSERT(!i.more());
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h b/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h
index 342f958679b..232335afb1b 100644
--- a/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h
+++ b/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h
@@ -43,9 +43,7 @@ public:
}
Validator getComparator() const final {
- return [strLen = strLen()](int lenWithoutNullTerm) {
- return lenWithoutNullTerm <= strLen;
- };
+ return [strLen = strLen()](int lenWithoutNullTerm) { return lenWithoutNullTerm <= strLen; };
}
std::unique_ptr<MatchExpression> shallowClone() const final {
diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h b/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h
index 0c379a61b23..06388abd044 100644
--- a/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h
+++ b/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h
@@ -43,9 +43,7 @@ public:
}
Validator getComparator() const final {
- return [strLen = strLen()](int lenWithoutNullTerm) {
- return lenWithoutNullTerm >= strLen;
- };
+ return [strLen = strLen()](int lenWithoutNullTerm) { return lenWithoutNullTerm >= strLen; };
}
std::unique_ptr<MatchExpression> shallowClone() const final {
diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
index 13bc5c47f1c..8eb9332aed7 100644
--- a/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
+++ b/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
@@ -80,8 +80,7 @@ TEST(InternalSchemaObjectMatchExpression, AcceptsObjectsThatMatch) {
<< "string"))));
ASSERT_TRUE(objMatch.matchesBSON(BSON("a" << BSON("b"
<< "string"
- << "c"
- << 1))));
+ << "c" << 1))));
ASSERT_FALSE(
objMatch.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 1) << BSON("b"
<< "string")))));
diff --git a/src/mongo/db/matcher/schema/json_pointer_test.cpp b/src/mongo/db/matcher/schema/json_pointer_test.cpp
index aed92b17784..f91d8888719 100644
--- a/src/mongo/db/matcher/schema/json_pointer_test.cpp
+++ b/src/mongo/db/matcher/schema/json_pointer_test.cpp
@@ -49,9 +49,8 @@ void assertPointerEvaluatesTo(std::string pointerStr,
}
TEST(JSONPointerTest, ParseInterestingCharacterFields) {
- BSONObj obj = BSON(
- "" << 1 << "c%d" << 2 << "e^f" << 3 << "g|h" << 4 << "i\\\\j" << 5 << "k\"l" << 6 << " "
- << 7);
+ BSONObj obj = BSON("" << 1 << "c%d" << 2 << "e^f" << 3 << "g|h" << 4 << "i\\\\j" << 5 << "k\"l"
+ << 6 << " " << 7);
assertPointerEvaluatesTo("/", obj, "", 1);
assertPointerEvaluatesTo("/c%d", obj, "c%d", 2);
assertPointerEvaluatesTo("/e^f", obj, "e^f", 3);
@@ -129,9 +128,8 @@ TEST(JSONPointerTest, ArrayTraversalTest) {
<< "value2")
<< BSON("builder3"
<< "value3"));
- auto topLevel =
- BSON("transit" << BSON("arrBottom" << arrBottom) << "arrTop" << arrTop << "toBSONArray"
- << bsonArray);
+ auto topLevel = BSON("transit" << BSON("arrBottom" << arrBottom) << "arrTop" << arrTop
+ << "toBSONArray" << bsonArray);
assertPointerEvaluatesTo("/transit/arrBottom/0", topLevel, "0", 0);
assertPointerEvaluatesTo("/toBSONArray/0/builder0", topLevel, "builder0", "value0");
assertPointerEvaluatesTo("/toBSONArray/3/builder3", topLevel, "builder3", "value3");
diff --git a/src/mongo/db/matcher/schema/json_schema_parser.cpp b/src/mongo/db/matcher/schema/json_schema_parser.cpp
index dd51115e276..0608c2389b4 100644
--- a/src/mongo/db/matcher/schema/json_schema_parser.cpp
+++ b/src/mongo/db/matcher/schema/json_schema_parser.cpp
@@ -74,7 +74,12 @@ using findBSONTypeAliasFun = std::function<boost::optional<BSONType>(StringData)
// Explicitly unsupported JSON Schema keywords.
const std::set<StringData> unsupportedKeywords{
- "$ref"_sd, "$schema"_sd, "default"_sd, "definitions"_sd, "format"_sd, "id"_sd,
+ "$ref"_sd,
+ "$schema"_sd,
+ "default"_sd,
+ "definitions"_sd,
+ "format"_sd,
+ "id"_sd,
};
constexpr StringData kNamePlaceholder = "i"_sd;
@@ -176,9 +181,9 @@ StatusWithMatchExpression parseMaximum(StringData path,
bool isExclusiveMaximum) {
if (!maximum.isNumber()) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaMaximumKeyword
- << "' must be a number")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMaximumKeyword
+ << "' must be a number")};
}
if (path.empty()) {
@@ -204,9 +209,9 @@ StatusWithMatchExpression parseMinimum(StringData path,
bool isExclusiveMinimum) {
if (!minimum.isNumber()) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaMinimumKeyword
- << "' must be a number")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMinimumKeyword
+ << "' must be a number")};
}
if (path.empty()) {
@@ -252,9 +257,9 @@ StatusWithMatchExpression parsePattern(StringData path,
InternalSchemaTypeExpression* typeExpr) {
if (pattern.type() != BSONType::String) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaPatternKeyword
- << "' must be a string")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaPatternKeyword
+ << "' must be a string")};
}
if (path.empty()) {
@@ -273,16 +278,16 @@ StatusWithMatchExpression parseMultipleOf(StringData path,
InternalSchemaTypeExpression* typeExpr) {
if (!multipleOf.isNumber()) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaMultipleOfKeyword
- << "' must be a number")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMultipleOfKeyword
+ << "' must be a number")};
}
if (multipleOf.numberDecimal().isNegative() || multipleOf.numberDecimal().isZero()) {
return {Status(ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaMultipleOfKeyword
- << "' must have a positive value")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMultipleOfKeyword
+ << "' must have a positive value")};
}
if (path.empty()) {
return {std::make_unique<AlwaysTrueMatchExpression>()};
@@ -407,7 +412,7 @@ StatusWith<StringDataSet> parseRequired(BSONElement requiredElt) {
<< propertyName.type()};
}
- const auto[it, didInsert] = properties.insert(propertyName.valueStringData());
+ const auto [it, didInsert] = properties.insert(propertyName.valueStringData());
if (!didInsert) {
return {ErrorCodes::FailedToParse,
str::stream() << "$jsonSchema keyword '"
@@ -460,9 +465,9 @@ StatusWithMatchExpression parseProperties(const boost::intrusive_ptr<ExpressionC
bool ignoreUnknownKeywords) {
if (propertiesElt.type() != BSONType::Object) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaPropertiesKeyword
- << "' must be an object")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaPropertiesKeyword
+ << "' must be an object")};
}
auto propertiesObj = propertiesElt.embeddedObject();
@@ -471,8 +476,7 @@ StatusWithMatchExpression parseProperties(const boost::intrusive_ptr<ExpressionC
if (property.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
str::stream() << "Nested schema for $jsonSchema property '"
- << property.fieldNameStringData()
- << "' must be an object"};
+ << property.fieldNameStringData() << "' must be an object"};
}
auto nestedSchemaMatch = _parse(expCtx,
@@ -534,11 +538,11 @@ StatusWith<std::vector<PatternSchema>> parsePatternProperties(
for (auto&& patternSchema : patternPropertiesElt.embeddedObject()) {
if (patternSchema.type() != BSONType::Object) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaPatternPropertiesKeyword
- << "' has property '"
- << patternSchema.fieldNameStringData()
- << "' which is not an object")};
+ str::stream()
+ << "$jsonSchema keyword '"
+ << JSONSchemaParser::kSchemaPatternPropertiesKeyword
+ << "' has property '" << patternSchema.fieldNameStringData()
+ << "' which is not an object")};
}
// Parse the nested schema using a placeholder as the path, since we intend on using the
@@ -841,11 +845,11 @@ StatusWith<boost::optional<long long>> parseItems(
for (auto subschema : itemsElt.embeddedObject()) {
if (subschema.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaItemsKeyword
- << "' requires that each element of the array is an "
- "object, but found a "
- << subschema.type()};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaItemsKeyword
+ << "' requires that each element of the array is an "
+ "object, but found a "
+ << subschema.type()};
}
// We want to make an ExpressionWithPlaceholder for $_internalSchemaMatchArrayIndex,
@@ -896,8 +900,7 @@ StatusWith<boost::optional<long long>> parseItems(
} else {
return {ErrorCodes::TypeMismatch,
str::stream() << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaItemsKeyword
- << "' must be an array or an object, not "
- << itemsElt.type()};
+ << "' must be an array or an object, not " << itemsElt.type()};
}
return startIndexForAdditionalItems;
@@ -1268,8 +1271,7 @@ Status translateScalarKeywords(StringMap<BSONElement>& keywordMap,
return {ErrorCodes::FailedToParse,
str::stream() << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMaximumKeyword
<< "' must be a present if "
- << JSONSchemaParser::kSchemaExclusiveMaximumKeyword
- << " is present"};
+ << JSONSchemaParser::kSchemaExclusiveMaximumKeyword << " is present"};
}
if (auto minimumElt = keywordMap[JSONSchemaParser::kSchemaMinimumKeyword]) {
@@ -1295,8 +1297,7 @@ Status translateScalarKeywords(StringMap<BSONElement>& keywordMap,
return {ErrorCodes::FailedToParse,
str::stream() << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMinimumKeyword
<< "' must be a present if "
- << JSONSchemaParser::kSchemaExclusiveMinimumKeyword
- << " is present"};
+ << JSONSchemaParser::kSchemaExclusiveMinimumKeyword << " is present"};
}
return Status::OK();
@@ -1316,10 +1317,8 @@ Status translateEncryptionKeywords(StringMap<BSONElement>& keywordMap,
if (encryptElt && encryptMetadataElt) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Cannot specify both $jsonSchema keywords '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' and '"
- << JSONSchemaParser::kSchemaEncryptMetadataKeyword
- << "'");
+ << JSONSchemaParser::kSchemaEncryptKeyword << "' and '"
+ << JSONSchemaParser::kSchemaEncryptMetadataKeyword << "'");
}
if (encryptMetadataElt) {
@@ -1389,9 +1388,9 @@ Status validateMetadataKeywords(StringMap<BSONElement>& keywordMap) {
if (auto titleElem = keywordMap[JSONSchemaParser::kSchemaTitleKeyword]) {
if (titleElem.type() != BSONType::String) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaTitleKeyword
- << "' must be of type string");
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaTitleKeyword
+ << "' must be of type string");
}
}
return Status::OK();
@@ -1446,16 +1445,16 @@ StatusWithMatchExpression _parse(const boost::intrusive_ptr<ExpressionContext>&
<< "' is not currently supported");
} else if (!ignoreUnknownKeywords) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Unknown $jsonSchema keyword: "
- << elt.fieldNameStringData());
+ str::stream()
+ << "Unknown $jsonSchema keyword: " << elt.fieldNameStringData());
}
continue;
}
if (it->second) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Duplicate $jsonSchema keyword: "
- << elt.fieldNameStringData());
+ str::stream()
+ << "Duplicate $jsonSchema keyword: " << elt.fieldNameStringData());
}
keywordMap[elt.fieldNameStringData()] = elt;
@@ -1472,28 +1471,24 @@ StatusWithMatchExpression _parse(const boost::intrusive_ptr<ExpressionContext>&
if (typeElem && bsonTypeElem) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Cannot specify both $jsonSchema keywords '"
- << JSONSchemaParser::kSchemaTypeKeyword
- << "' and '"
- << JSONSchemaParser::kSchemaBsonTypeKeyword
- << "'");
+ << JSONSchemaParser::kSchemaTypeKeyword << "' and '"
+ << JSONSchemaParser::kSchemaBsonTypeKeyword << "'");
} else if (typeElem && encryptElem) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' cannot be used in conjunction with '"
- << JSONSchemaParser::kSchemaTypeKeyword
- << "', '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' implies type 'bsonType::BinData'");
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaEncryptKeyword
+ << "' cannot be used in conjunction with '"
+ << JSONSchemaParser::kSchemaTypeKeyword << "', '"
+ << JSONSchemaParser::kSchemaEncryptKeyword
+ << "' implies type 'bsonType::BinData'");
} else if (bsonTypeElem && encryptElem) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' cannot be used in conjunction with '"
- << JSONSchemaParser::kSchemaBsonTypeKeyword
- << "', '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' implies type 'bsonType::BinData'");
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaEncryptKeyword
+ << "' cannot be used in conjunction with '"
+ << JSONSchemaParser::kSchemaBsonTypeKeyword << "', '"
+ << JSONSchemaParser::kSchemaEncryptKeyword
+ << "' implies type 'bsonType::BinData'");
}
std::unique_ptr<InternalSchemaTypeExpression> typeExpr;
@@ -1584,25 +1579,25 @@ StatusWith<MatcherTypeSet> JSONSchemaParser::parseTypeSet(
for (auto&& typeArrayEntry : typeElt.embeddedObject()) {
if (typeArrayEntry.type() != BSONType::String) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << typeElt.fieldNameStringData()
- << "' array elements must be strings")};
+ str::stream()
+ << "$jsonSchema keyword '" << typeElt.fieldNameStringData()
+ << "' array elements must be strings")};
}
if (typeArrayEntry.valueStringData() == JSONSchemaParser::kSchemaTypeInteger) {
return {ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema type '"
- << JSONSchemaParser::kSchemaTypeInteger
- << "' is not currently supported."};
+ str::stream()
+ << "$jsonSchema type '" << JSONSchemaParser::kSchemaTypeInteger
+ << "' is not currently supported."};
}
auto insertionResult = aliases.insert(typeArrayEntry.valueStringData());
if (!insertionResult.second) {
- return {Status(ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema keyword '"
- << typeElt.fieldNameStringData()
- << "' has duplicate value: "
- << typeArrayEntry.valueStringData())};
+ return {
+ Status(ErrorCodes::FailedToParse,
+ str::stream()
+ << "$jsonSchema keyword '" << typeElt.fieldNameStringData()
+ << "' has duplicate value: " << typeArrayEntry.valueStringData())};
}
}
}
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 756263a6ff5..11a75108e9c 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -417,8 +417,7 @@ Status storeMongodOptions(const moe::Environment& params) {
storageGlobalParams.syncdelay > StorageGlobalParams::kMaxSyncdelaySecs) {
return Status(ErrorCodes::BadValue,
str::stream() << "syncdelay out of allowed range (0-"
- << StorageGlobalParams::kMaxSyncdelaySecs
- << "s)");
+ << StorageGlobalParams::kMaxSyncdelaySecs << "s)");
}
}
@@ -457,9 +456,9 @@ Status storeMongodOptions(const moe::Environment& params) {
if (journalCommitIntervalMs < 1 ||
journalCommitIntervalMs > StorageGlobalParams::kMaxJournalCommitIntervalMs) {
return Status(ErrorCodes::BadValue,
- str::stream() << "--journalCommitInterval out of allowed range (1-"
- << StorageGlobalParams::kMaxJournalCommitIntervalMs
- << "ms)");
+ str::stream()
+ << "--journalCommitInterval out of allowed range (1-"
+ << StorageGlobalParams::kMaxJournalCommitIntervalMs << "ms)");
}
}
diff --git a/src/mongo/db/mongod_options.h b/src/mongo/db/mongod_options.h
index 62f86b51611..4ed3efa0afa 100644
--- a/src/mongo/db/mongod_options.h
+++ b/src/mongo/db/mongod_options.h
@@ -84,4 +84,4 @@ Status storeMongodOptions(const moe::Environment& params);
* Help test user for storage.dbPath config option.
*/
std::string storageDBPathDescription();
-}
+} // namespace mongo
diff --git a/src/mongo/db/multi_key_path_tracker.cpp b/src/mongo/db/multi_key_path_tracker.cpp
index d1c2c1ca293..d78271932e5 100644
--- a/src/mongo/db/multi_key_path_tracker.cpp
+++ b/src/mongo/db/multi_key_path_tracker.cpp
@@ -61,8 +61,8 @@ std::string MultikeyPathTracker::dumpMultikeyPaths(const MultikeyPaths& multikey
void MultikeyPathTracker::mergeMultikeyPaths(MultikeyPaths* toMergeInto,
const MultikeyPaths& newPaths) {
invariant(toMergeInto->size() == newPaths.size(),
- str::stream() << "toMergeInto: " << dumpMultikeyPaths(*toMergeInto) << "; newPaths: "
- << dumpMultikeyPaths(newPaths));
+ str::stream() << "toMergeInto: " << dumpMultikeyPaths(*toMergeInto)
+ << "; newPaths: " << dumpMultikeyPaths(newPaths));
for (auto idx = std::size_t(0); idx < toMergeInto->size(); ++idx) {
toMergeInto->at(idx).insert(newPaths[idx].begin(), newPaths[idx].end());
}
diff --git a/src/mongo/db/multi_key_path_tracker_test.cpp b/src/mongo/db/multi_key_path_tracker_test.cpp
index 580b69519f3..9203ff5ff4a 100644
--- a/src/mongo/db/multi_key_path_tracker_test.cpp
+++ b/src/mongo/db/multi_key_path_tracker_test.cpp
@@ -47,8 +47,7 @@ void assertMultikeyPathsAreEqual(const MultikeyPaths& actual, const MultikeyPath
if (!match) {
FAIL(str::stream() << "Expected: " << MultikeyPathTracker::dumpMultikeyPaths(expected)
<< ", "
- << "Actual: "
- << MultikeyPathTracker::dumpMultikeyPaths(actual));
+ << "Actual: " << MultikeyPathTracker::dumpMultikeyPaths(actual));
}
ASSERT(match);
}
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index 9a98e3f3dd5..4f4967b10c3 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -193,8 +193,8 @@ StatusWith<repl::OpTime> NamespaceString::getDropPendingNamespaceOpTime() const
long long term;
status = mongo::NumberParser{}(opTimeStr.substr(termSeparatorIndex + 1), &term);
if (!status.isOK()) {
- return status.withContext(str::stream() << "Invalid term in drop-pending namespace: "
- << _ns);
+ return status.withContext(str::stream()
+ << "Invalid term in drop-pending namespace: " << _ns);
}
return repl::OpTime(Timestamp(Seconds(seconds), increment), term);
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index ca72ef06a95..e44c135e7d2 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -64,8 +64,8 @@
#include "mongo/util/fail_point_service.h"
namespace mongo {
-using repl::OplogEntry;
using repl::MutableOplogEntry;
+using repl::OplogEntry;
namespace {
@@ -436,11 +436,8 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
if (!collElem || args.nss.ns() == collElem.String()) {
uasserted(40654,
str::stream() << "failCollectionUpdates failpoint enabled, namespace: "
- << args.nss.ns()
- << ", update: "
- << args.updateArgs.update
- << " on document with "
- << args.updateArgs.criteria);
+ << args.nss.ns() << ", update: " << args.updateArgs.update
+ << " on document with " << args.updateArgs.criteria);
}
}
@@ -988,7 +985,6 @@ void logCommitOrAbortForPreparedTransaction(OperationContext* opCtx,
writeConflictRetry(
opCtx, "onPreparedTransactionCommitOrAbort", NamespaceString::kRsOplogNamespace.ns(), [&] {
-
// Writes to the oplog only require a Global intent lock. Guaranteed by
// OplogSlotReserver.
invariant(opCtx->lockState()->isWriteLocked());
diff --git a/src/mongo/db/op_observer_impl_test.cpp b/src/mongo/db/op_observer_impl_test.cpp
index bd333d78ecc..310d246cd31 100644
--- a/src/mongo/db/op_observer_impl_test.cpp
+++ b/src/mongo/db/op_observer_impl_test.cpp
@@ -120,12 +120,10 @@ TEST_F(OpObserverTest, StartIndexBuildExpectedOplogEntry) {
BSONObj specX = BSON("key" << BSON("x" << 1) << "name"
<< "x_1"
- << "v"
- << 2);
+ << "v" << 2);
BSONObj specA = BSON("key" << BSON("a" << 1) << "name"
<< "a_1"
- << "v"
- << 2);
+ << "v" << 2);
std::vector<BSONObj> specs = {specX, specA};
// Write to the oplog.
@@ -162,12 +160,10 @@ TEST_F(OpObserverTest, CommitIndexBuildExpectedOplogEntry) {
BSONObj specX = BSON("key" << BSON("x" << 1) << "name"
<< "x_1"
- << "v"
- << 2);
+ << "v" << 2);
BSONObj specA = BSON("key" << BSON("a" << 1) << "name"
<< "a_1"
- << "v"
- << 2);
+ << "v" << 2);
std::vector<BSONObj> specs = {specX, specA};
// Write to the oplog.
@@ -204,12 +200,10 @@ TEST_F(OpObserverTest, AbortIndexBuildExpectedOplogEntry) {
BSONObj specX = BSON("key" << BSON("x" << 1) << "name"
<< "x_1"
- << "v"
- << 2);
+ << "v" << 2);
BSONObj specA = BSON("key" << BSON("a" << 1) << "name"
<< "a_1"
- << "v"
- << 2);
+ << "v" << 2);
std::vector<BSONObj> specs = {specX, specA};
// Write to the oplog.
@@ -289,8 +283,7 @@ TEST_F(OpObserverTest, CollModWithCollectionOptionsAndTTLInfo) {
BSON("collectionOptions_old"
<< BSON("validationLevel" << oldCollOpts.validationLevel << "validationAction"
<< oldCollOpts.validationAction)
- << "expireAfterSeconds_old"
- << durationCount<Seconds>(ttlInfo.oldExpireAfterSeconds));
+ << "expireAfterSeconds_old" << durationCount<Seconds>(ttlInfo.oldExpireAfterSeconds));
ASSERT_BSONOBJ_EQ(o2Expected, o2);
}
@@ -392,10 +385,9 @@ TEST_F(OpObserverTest, OnRenameCollectionReturnsRenameOpTime) {
// Ensure that renameCollection fields were properly added to oplog entry.
ASSERT_EQUALS(uuid, unittest::assertGet(UUID::parse(oplogEntry["ui"])));
auto o = oplogEntry.getObjectField("o");
- auto oExpected = BSON(
- "renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "stayTemp" << stayTemp
- << "dropTarget"
- << dropTargetUuid);
+ auto oExpected =
+ BSON("renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "stayTemp"
+ << stayTemp << "dropTarget" << dropTargetUuid);
ASSERT_BSONOBJ_EQ(oExpected, o);
// Ensure that the rename optime returned is the same as the last optime in the ReplClientInfo.
@@ -424,8 +416,8 @@ TEST_F(OpObserverTest, OnRenameCollectionOmitsDropTargetFieldIfDropTargetUuidIsN
// Ensure that renameCollection fields were properly added to oplog entry.
ASSERT_EQUALS(uuid, unittest::assertGet(UUID::parse(oplogEntry["ui"])));
auto o = oplogEntry.getObjectField("o");
- auto oExpected = BSON(
- "renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "stayTemp" << stayTemp);
+ auto oExpected = BSON("renameCollection" << sourceNss.ns() << "to" << targetNss.ns()
+ << "stayTemp" << stayTemp);
ASSERT_BSONOBJ_EQ(oExpected, o);
}
@@ -734,45 +726,28 @@ TEST_F(OpObserverTransactionTest, TransactionalPrepareTest) {
checkCommonFields(oplogEntryObj);
OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj));
auto o = oplogEntry.getObject();
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0 << "data"
- << "x"))
- << BSON("op"
- << "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1 << "data"
- << "y"))
- << BSON("op"
- << "u"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("$set" << BSON("data"
- << "y"))
- << "o2"
- << BSON("_id" << 0))
- << BSON("op"
- << "d"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "prepare"
- << true);
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 0 << "data"
+ << "x"))
+ << BSON("op"
+ << "i"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 1 << "data"
+ << "y"))
+ << BSON("op"
+ << "u"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("$set" << BSON("data"
+ << "y"))
+ << "o2" << BSON("_id" << 0))
+ << BSON("op"
+ << "d"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 0)))
+ << "prepare" << true);
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT(oplogEntry.shouldPrepare());
ASSERT_EQ(oplogEntry.getTimestamp(), opCtx()->recoveryUnit()->getPrepareTimestamp());
@@ -837,16 +812,11 @@ TEST_F(OpObserverTransactionTest, TransactionalPreparedCommitTest) {
checkCommonFields(oplogEntryObj);
OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj));
auto o = oplogEntry.getObject();
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.toString()
- << "ui"
- << uuid
- << "o"
- << doc))
- << "prepare"
- << true);
+ auto oExpected = BSON(
+ "applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.toString() << "ui" << uuid << "o" << doc))
+ << "prepare" << true);
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT(oplogEntry.shouldPrepare());
}
@@ -905,16 +875,11 @@ TEST_F(OpObserverTransactionTest, TransactionalPreparedAbortTest) {
checkCommonFields(oplogEntryObj);
OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj));
auto o = oplogEntry.getObject();
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.toString()
- << "ui"
- << uuid
- << "o"
- << doc))
- << "prepare"
- << true);
+ auto oExpected = BSON(
+ "applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.toString() << "ui" << uuid << "o" << doc))
+ << "prepare" << true);
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT(oplogEntry.shouldPrepare());
}
@@ -1159,42 +1124,27 @@ TEST_F(OpObserverTransactionTest, TransactionalInsertTest) {
checkCommonFields(oplogEntryObj);
OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj));
auto o = oplogEntry.getObject();
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0 << "data"
- << "x"))
- << BSON("op"
- << "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1 << "data"
- << "y"))
- << BSON("op"
- << "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2 << "data"
- << "z"))
- << BSON("op"
- << "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3 << "data"
- << "w"))));
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 0 << "data"
+ << "x"))
+ << BSON("op"
+ << "i"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 1 << "data"
+ << "y"))
+ << BSON("op"
+ << "i"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("_id" << 2 << "data"
+ << "z"))
+ << BSON("op"
+ << "i"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("_id" << 3 << "data"
+ << "w"))));
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT(!oplogEntry.shouldPrepare());
ASSERT_FALSE(oplogEntryObj.hasField("prepare"));
@@ -1236,28 +1186,19 @@ TEST_F(OpObserverTransactionTest, TransactionalUpdateTest) {
auto oplogEntry = getSingleOplogEntry(opCtx());
checkCommonFields(oplogEntry);
auto o = oplogEntry.getObjectField("o");
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("$set" << BSON("data"
- << "x"))
- << "o2"
- << BSON("_id" << 0))
- << BSON("op"
- << "u"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("$set" << BSON("data"
- << "y"))
- << "o2"
- << BSON("_id" << 1))));
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("$set" << BSON("data"
+ << "x"))
+ << "o2" << BSON("_id" << 0))
+ << BSON("op"
+ << "u"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("$set" << BSON("data"
+ << "y"))
+ << "o2" << BSON("_id" << 1))));
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT_FALSE(oplogEntry.hasField("prepare"));
ASSERT_FALSE(oplogEntry.getBoolField("prepare"));
@@ -1292,20 +1233,12 @@ TEST_F(OpObserverTransactionTest, TransactionalDeleteTest) {
auto o = oplogEntry.getObjectField("o");
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0))
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0))
<< BSON("op"
<< "d"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 1))));
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 1))));
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT_FALSE(oplogEntry.hasField("prepare"));
ASSERT_FALSE(oplogEntry.getBoolField("prepare"));
@@ -1350,12 +1283,8 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionSingleStatementTest) {
// The implicit commit oplog entry.
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss.toString()
- << "ui"
- << uuid
- << "o"
- << BSON("_id" << 0))));
+ << "ns" << nss.toString() << "ui" << uuid
+ << "o" << BSON("_id" << 0))));
ASSERT_BSONOBJ_EQ(oExpected, oplogEntry.getObject());
}
@@ -1394,52 +1323,32 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalInsertTest) {
}
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 1)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2)))
- << "partialTxn"
- << true);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 2)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[2].getObject());
// This should be the implicit commit oplog entry, indicated by the absence of the 'partialTxn'
// field.
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3)))
- << "count"
- << 4);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 3)))
+ << "count" << 4);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[3].getObject());
}
@@ -1490,36 +1399,26 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalUpdateTest) {
expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()};
}
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("$set" << BSON("data"
- << "x"))
- << "o2"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("$set" << BSON("data"
+ << "x"))
+ << "o2" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
// This should be the implicit commit oplog entry, indicated by the absence of the 'partialTxn'
// field.
- oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("$set" << BSON("data"
- << "y"))
- << "o2"
- << BSON("_id" << 1)))
- << "count"
- << 2);
+ oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("$set" << BSON("data"
+ << "y"))
+ << "o2" << BSON("_id" << 1)))
+ << "count" << 2);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
}
@@ -1563,28 +1462,18 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalDeleteTest) {
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
// This should be the implicit commit oplog entry, indicated by the absence of the 'partialTxn'
// field.
oExpected = oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 1)))
- << "count"
- << 2);
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 1)))
+ << "count" << 2);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
}
@@ -1634,52 +1523,30 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalInsertPrepareTest) {
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 1)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2)))
- << "partialTxn"
- << true);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 2)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[2].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3)))
- << "prepare"
- << true
- << "count"
- << 4);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 3)))
+ << "prepare" << true << "count" << 4);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[3].getObject());
ASSERT_EQ(prepareOpTime.getTimestamp(), opCtx()->recoveryUnit()->getPrepareTimestamp());
@@ -1742,36 +1609,24 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalUpdatePrepareTest) {
expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()};
}
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("$set" << BSON("data"
- << "x"))
- << "o2"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("$set" << BSON("data"
+ << "x"))
+ << "o2" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
- oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("$set" << BSON("data"
- << "y"))
- << "o2"
- << BSON("_id" << 1)))
- << "prepare"
- << true
- << "count"
- << 2);
+ oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("$set" << BSON("data"
+ << "y"))
+ << "o2" << BSON("_id" << 1)))
+ << "prepare" << true << "count" << 2);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
ASSERT_EQ(prepareOpTime.getTimestamp(), opCtx()->recoveryUnit()->getPrepareTimestamp());
@@ -1831,28 +1686,16 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalDeletePrepareTest) {
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 1)))
- << "prepare"
- << true
- << "count"
- << 2);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 1)))
+ << "prepare" << true << "count" << 2);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
ASSERT_EQ(prepareOpTime.getTimestamp(), opCtx()->recoveryUnit()->getPrepareTimestamp());
@@ -2060,36 +1903,20 @@ TEST_F(OpObserverMultiEntryTransactionTest, UnpreparedTransactionPackingTest) {
}
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0))
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0))
<< BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1))
+ << "ns" << nss1.toString() << "ui"
+ << uuid1 << "o" << BSON("_id" << 1))
<< BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2))
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 2))
<< BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3))));
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 3))));
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
}
@@ -2133,38 +1960,21 @@ TEST_F(OpObserverMultiEntryTransactionTest, PreparedTransactionPackingTest) {
expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()};
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0))
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0))
<< BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1))
+ << "ns" << nss1.toString() << "ui"
+ << uuid1 << "o" << BSON("_id" << 1))
<< BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2))
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 2))
<< BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3)))
- << "prepare"
- << true);
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 3)))
+ << "prepare" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
}
diff --git a/src/mongo/db/op_observer_util.h b/src/mongo/db/op_observer_util.h
index e3a7d195e7a..7e60c66cca8 100644
--- a/src/mongo/db/op_observer_util.h
+++ b/src/mongo/db/op_observer_util.h
@@ -42,4 +42,4 @@ BSONObj makeCreateCollCmdObj(const NamespaceString& collectionName,
BSONObj makeCollModCmdObj(const BSONObj& collModCmd,
const CollectionOptions& oldCollOptions,
boost::optional<TTLCollModInfo> ttlInfo);
-}
+} // namespace mongo
diff --git a/src/mongo/db/operation_time_tracker.cpp b/src/mongo/db/operation_time_tracker.cpp
index 9c2b6d74774..27832209b69 100644
--- a/src/mongo/db/operation_time_tracker.cpp
+++ b/src/mongo/db/operation_time_tracker.cpp
@@ -42,7 +42,7 @@ struct OperationTimeTrackerHolder {
const OperationContext::Decoration<OperationTimeTrackerHolder> OperationTimeTrackerHolder::get =
OperationContext::declareDecoration<OperationTimeTrackerHolder>();
-}
+} // namespace
std::shared_ptr<OperationTimeTracker> OperationTimeTracker::get(OperationContext* opCtx) {
auto timeTrackerHolder = OperationTimeTrackerHolder::get(opCtx);
diff --git a/src/mongo/db/ops/delete.h b/src/mongo/db/ops/delete.h
index 99ebccf0378..b26f583d460 100644
--- a/src/mongo/db/ops/delete.h
+++ b/src/mongo/db/ops/delete.h
@@ -50,4 +50,4 @@ long long deleteObjects(OperationContext* opCtx,
bool justOne,
bool god = false,
bool fromMigrate = false);
-}
+} // namespace mongo
diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp
index d48b4c104db..d0b9debab28 100644
--- a/src/mongo/db/ops/insert.cpp
+++ b/src/mongo/db/ops/insert.cpp
@@ -58,9 +58,9 @@ Status validateDepth(const BSONObj& obj) {
// We're exactly at the limit, so descending to the next level would exceed
// the maximum depth.
return {ErrorCodes::Overflow,
- str::stream() << "cannot insert document because it exceeds "
- << BSONDepth::getMaxDepthForUserStorage()
- << " levels of nesting"};
+ str::stream()
+ << "cannot insert document because it exceeds "
+ << BSONDepth::getMaxDepthForUserStorage() << " levels of nesting"};
}
frames.emplace_back(elem.embeddedObject());
}
@@ -78,10 +78,8 @@ StatusWith<BSONObj> fixDocumentForInsert(ServiceContext* service, const BSONObj&
if (doc.objsize() > BSONObjMaxUserSize)
return StatusWith<BSONObj>(ErrorCodes::BadValue,
str::stream() << "object to insert too large"
- << ". size in bytes: "
- << doc.objsize()
- << ", max size: "
- << BSONObjMaxUserSize);
+ << ". size in bytes: " << doc.objsize()
+ << ", max size: " << BSONObjMaxUserSize);
auto depthStatus = validateDepth(doc);
if (!depthStatus.isOK()) {
@@ -266,4 +264,4 @@ Status userAllowedCreateNS(StringData db, StringData coll) {
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/ops/insert.h b/src/mongo/db/ops/insert.h
index ebbf9738460..8bdcbadc281 100644
--- a/src/mongo/db/ops/insert.h
+++ b/src/mongo/db/ops/insert.h
@@ -58,4 +58,4 @@ Status userAllowedWriteNS(const NamespaceString& ns);
* operations. If not, returns an error Status.
*/
Status userAllowedCreateNS(StringData db, StringData coll);
-}
+} // namespace mongo
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index a506bb88c0c..a600f37a543 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -82,8 +82,7 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
if (userInitiatedWritesAndNotPrimary) {
uassertStatusOK(Status(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while creating collection "
- << nsString
- << " during upsert"));
+ << nsString << " during upsert"));
}
WriteUnitOfWork wuow(opCtx);
collection = db->createCollection(opCtx, nsString, CollectionOptions());
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index f8f6adc1fe5..dcb4f13b950 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -363,8 +363,9 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
"hangDuringBatchInsert",
[&wholeOp]() {
log() << "batch insert - hangDuringBatchInsert fail point enabled for namespace "
- << wholeOp.getNamespace() << ". Blocking "
- "until fail point is disabled.";
+ << wholeOp.getNamespace()
+ << ". Blocking "
+ "until fail point is disabled.";
},
true, // Check for interrupt periodically.
wholeOp.getNamespace());
@@ -502,7 +503,6 @@ WriteResult performInserts(OperationContext* opCtx,
durationCount<Microseconds>(curOp.elapsedTimeExcludingPauses()),
curOp.isCommand(),
curOp.getReadWriteType());
-
});
{
@@ -859,7 +859,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
"until fail point is disabled.";
},
true // Check for interrupt periodically.
- );
+ );
if (MONGO_FAIL_POINT(failAllRemoves)) {
uasserted(ErrorCodes::InternalError, "failAllRemoves failpoint active!");
}
diff --git a/src/mongo/db/ops/write_ops_parsers.cpp b/src/mongo/db/ops/write_ops_parsers.cpp
index e63dbb500d9..935139adfda 100644
--- a/src/mongo/db/ops/write_ops_parsers.cpp
+++ b/src/mongo/db/ops/write_ops_parsers.cpp
@@ -39,11 +39,11 @@
namespace mongo {
+using write_ops::Delete;
+using write_ops::DeleteOpEntry;
using write_ops::Insert;
using write_ops::Update;
-using write_ops::Delete;
using write_ops::UpdateOpEntry;
-using write_ops::DeleteOpEntry;
namespace {
@@ -51,10 +51,7 @@ template <class T>
void checkOpCountForCommand(const T& op, size_t numOps) {
uassert(ErrorCodes::InvalidLength,
str::stream() << "Write batch sizes must be between 1 and "
- << write_ops::kMaxWriteBatchSize
- << ". Got "
- << numOps
- << " operations.",
+ << write_ops::kMaxWriteBatchSize << ". Got " << numOps << " operations.",
numOps != 0 && numOps <= write_ops::kMaxWriteBatchSize);
const auto& stmtIds = op.getWriteCommandBase().getStmtIds();
diff --git a/src/mongo/db/ops/write_ops_parsers_test.cpp b/src/mongo/db/ops/write_ops_parsers_test.cpp
index e9499ecde08..b5074350ef4 100644
--- a/src/mongo/db/ops/write_ops_parsers_test.cpp
+++ b/src/mongo/db/ops/write_ops_parsers_test.cpp
@@ -44,9 +44,7 @@ TEST(CommandWriteOpsParsers, CommonFields_BypassDocumentValidation) {
for (BSONElement bypassDocumentValidation : BSON_ARRAY(true << false << 1 << 0 << 1.0 << 0.0)) {
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "bypassDocumentValidation"
+ << "documents" << BSON_ARRAY(BSONObj()) << "bypassDocumentValidation"
<< bypassDocumentValidation);
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
@@ -61,10 +59,7 @@ TEST(CommandWriteOpsParsers, CommonFields_Ordered) {
for (bool ordered : {true, false}) {
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "ordered"
- << ordered);
+ << "documents" << BSON_ARRAY(BSONObj()) << "ordered" << ordered);
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
auto op = InsertOp::parse(request);
@@ -77,14 +72,8 @@ TEST(CommandWriteOpsParsers, CommonFields_IgnoredFields) {
// These flags are ignored, so there is nothing to check other than that this doesn't throw.
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "maxTimeMS"
- << 1000
- << "shardVersion"
- << BSONObj()
- << "writeConcern"
- << BSONObj());
+ << "documents" << BSON_ARRAY(BSONObj()) << "maxTimeMS" << 1000 << "shardVersion"
+ << BSONObj() << "writeConcern" << BSONObj());
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
InsertOp::parse(request);
@@ -94,10 +83,7 @@ TEST(CommandWriteOpsParsers, CommonFields_IgnoredFields) {
TEST(CommandWriteOpsParsers, GarbageFieldsAtTopLevel_Body) {
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "GARBAGE"
- << BSON_ARRAY(BSONObj()));
+ << "documents" << BSON_ARRAY(BSONObj()) << "GARBAGE" << BSON_ARRAY(BSONObj()));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS(InsertOp::parse(request), AssertionException);
@@ -105,12 +91,10 @@ TEST(CommandWriteOpsParsers, GarbageFieldsAtTopLevel_Body) {
}
TEST(CommandWriteOpsParsers, ErrorOnDuplicateCommonField) {
- auto cmd = BSON("insert"
- << "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "documents"
- << BSON_ARRAY(BSONObj()));
+ auto cmd =
+ BSON("insert"
+ << "bar"
+ << "documents" << BSON_ARRAY(BSONObj()) << "documents" << BSON_ARRAY(BSONObj()));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS(InsertOp::parse(request), AssertionException);
@@ -121,9 +105,7 @@ TEST(CommandWriteOpsParsers, ErrorOnDuplicateCommonFieldBetweenBodyAndSequence)
OpMsgRequest request;
request.body = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "$db"
+ << "documents" << BSON_ARRAY(BSONObj()) << "$db"
<< "foo");
request.sequences = {{"documents",
{
@@ -134,12 +116,10 @@ TEST(CommandWriteOpsParsers, ErrorOnDuplicateCommonFieldBetweenBodyAndSequence)
}
TEST(CommandWriteOpsParsers, ErrorOnWrongSizeStmtIdsArray) {
- auto cmd = BSON("insert"
- << "bar"
- << "documents"
- << BSON_ARRAY(BSONObj() << BSONObj())
- << "stmtIds"
- << BSON_ARRAY(12));
+ auto cmd =
+ BSON("insert"
+ << "bar"
+ << "documents" << BSON_ARRAY(BSONObj() << BSONObj()) << "stmtIds" << BSON_ARRAY(12));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS_CODE(InsertOp::parse(request), AssertionException, ErrorCodes::InvalidLength);
@@ -149,12 +129,8 @@ TEST(CommandWriteOpsParsers, ErrorOnWrongSizeStmtIdsArray) {
TEST(CommandWriteOpsParsers, ErrorOnStmtIdSpecifiedTwoWays) {
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "stmtIds"
- << BSON_ARRAY(12)
- << "stmtId"
- << 13);
+ << "documents" << BSON_ARRAY(BSONObj()) << "stmtIds" << BSON_ARRAY(12)
+ << "stmtId" << 13);
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS_CODE(
@@ -174,10 +150,10 @@ TEST(CommandWriteOpsParsers, GarbageFieldsInUpdateDoc) {
}
TEST(CommandWriteOpsParsers, GarbageFieldsInDeleteDoc) {
- auto cmd = BSON("delete"
- << "bar"
- << "deletes"
- << BSON_ARRAY(BSON("q" << BSONObj() << "limit" << 0 << "GARBAGE" << 1)));
+ auto cmd =
+ BSON("delete"
+ << "bar"
+ << "deletes" << BSON_ARRAY(BSON("q" << BSONObj() << "limit" << 0 << "GARBAGE" << 1)));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS(DeleteOp::parse(request), AssertionException);
@@ -324,12 +300,7 @@ TEST(CommandWriteOpsParsers, Update) {
for (bool multi : {false, true}) {
auto rawUpdate =
BSON("q" << query << "u" << update << "arrayFilters" << BSON_ARRAY(arrayFilter)
- << "multi"
- << multi
- << "upsert"
- << upsert
- << "collation"
- << collation);
+ << "multi" << multi << "upsert" << upsert << "collation" << collation);
auto cmd = BSON("update" << ns.coll() << "updates" << BSON_ARRAY(rawUpdate));
for (bool seq : {false, true}) {
auto request = toOpMsg(ns.db(), cmd, seq);
@@ -365,10 +336,8 @@ TEST(CommandWriteOpsParsers, UpdateWithPipeline) {
<< "en_US");
for (bool upsert : {false, true}) {
for (bool multi : {false, true}) {
- auto rawUpdate = BSON(
- "q" << query["q"] << "u" << update["u"] << "multi" << multi << "upsert" << upsert
- << "collation"
- << collation);
+ auto rawUpdate = BSON("q" << query["q"] << "u" << update["u"] << "multi" << multi
+ << "upsert" << upsert << "collation" << collation);
auto cmd = BSON("update" << ns.coll() << "updates" << BSON_ARRAY(rawUpdate));
for (bool seq : {false, true}) {
auto request = toOpMsg(ns.db(), cmd, seq);
@@ -423,8 +392,7 @@ TEST(CommandWriteOpsParsers, RemoveErrorsWithBadLimit) {
for (BSONElement limit : BSON_ARRAY(-1 << 2 << 0.5)) {
auto cmd = BSON("delete"
<< "bar"
- << "deletes"
- << BSON_ARRAY(BSON("q" << BSONObj() << "limit" << limit)));
+ << "deletes" << BSON_ARRAY(BSON("q" << BSONObj() << "limit" << limit)));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS_CODE(
diff --git a/src/mongo/db/ops/write_ops_retryability.cpp b/src/mongo/db/ops/write_ops_retryability.cpp
index 32a160d433c..866385c73fe 100644
--- a/src/mongo/db/ops/write_ops_retryability.cpp
+++ b/src/mongo/db/ops/write_ops_retryability.cpp
@@ -56,11 +56,8 @@ void validateFindAndModifyRetryability(const FindAndModifyRequest& request,
40606,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " is not compatible with previous write in the transaction of type: "
- << OpType_serializer(oplogEntry.getOpType())
- << ", oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << OpType_serializer(oplogEntry.getOpType()) << ", oplogTs: "
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
request.isRemove());
uassert(40607,
str::stream() << "No pre-image available for findAndModify retry request:"
@@ -71,22 +68,16 @@ void validateFindAndModifyRetryability(const FindAndModifyRequest& request,
40608,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " is not compatible with previous write in the transaction of type: "
- << OpType_serializer(oplogEntry.getOpType())
- << ", oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << OpType_serializer(oplogEntry.getOpType()) << ", oplogTs: "
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
request.isUpsert());
} else {
uassert(
40609,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " is not compatible with previous write in the transaction of type: "
- << OpType_serializer(oplogEntry.getOpType())
- << ", oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << OpType_serializer(oplogEntry.getOpType()) << ", oplogTs: "
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
opType == repl::OpTypeEnum::kUpdate);
if (request.shouldReturnNew()) {
@@ -94,18 +85,14 @@ void validateFindAndModifyRetryability(const FindAndModifyRequest& request,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " wants the document after update returned, but only before "
"update document is stored, oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
oplogWithCorrectLinks.getPostImageOpTime());
} else {
uassert(40612,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " wants the document before update returned, but only after "
"update document is stored, oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
oplogWithCorrectLinks.getPreImageOpTime());
}
}
@@ -129,8 +116,7 @@ BSONObj extractPreOrPostImage(OperationContext* opCtx, const repl::OplogEntry& o
uassert(40613,
str::stream() << "oplog no longer contains the complete write history of this "
"transaction, log with opTime "
- << opTime.toString()
- << " cannot be found",
+ << opTime.toString() << " cannot be found",
!oplogDoc.isEmpty());
auto oplogEntry = uassertStatusOK(repl::OplogEntry::parse(oplogDoc));
@@ -172,8 +158,7 @@ repl::OplogEntry getInnerNestedOplogEntry(const repl::OplogEntry& entry) {
uassert(40635,
str::stream() << "expected nested oplog entry with ts: "
<< entry.getTimestamp().toString()
- << " to have o2 field: "
- << redact(entry.toBSON()),
+ << " to have o2 field: " << redact(entry.toBSON()),
entry.getObject2());
return uassertStatusOK(repl::OplogEntry::parse(*entry.getObject2()));
}
@@ -200,10 +185,8 @@ SingleWriteResult parseOplogEntryForUpdate(const repl::OplogEntry& entry) {
str::stream() << "update retry request is not compatible with previous write in "
"the transaction of type: "
<< OpType_serializer(entry.getOpType())
- << ", oplogTs: "
- << entry.getTimestamp().toString()
- << ", oplog: "
- << redact(entry.toBSON()));
+ << ", oplogTs: " << entry.getTimestamp().toString()
+ << ", oplog: " << redact(entry.toBSON()));
}
return res;
diff --git a/src/mongo/db/ops/write_ops_retryability_test.cpp b/src/mongo/db/ops/write_ops_retryability_test.cpp
index 05c4828dae1..550744fa95c 100644
--- a/src/mongo/db/ops/write_ops_retryability_test.cpp
+++ b/src/mongo/db/ops/write_ops_retryability_test.cpp
@@ -78,15 +78,12 @@ repl::OplogEntry makeOplogEntry(repl::OpTime opTime,
}
TEST_F(WriteOpsRetryability, ParseOplogEntryForUpdate) {
- const auto entry =
- assertGet(repl::OplogEntry::parse(BSON("ts" << Timestamp(50, 10) << "t" << 1LL << "op"
- << "u"
- << "ns"
- << "a.b"
- << "o"
- << BSON("_id" << 1 << "x" << 5)
- << "o2"
- << BSON("_id" << 1))));
+ const auto entry = assertGet(repl::OplogEntry::parse(
+ BSON("ts" << Timestamp(50, 10) << "t" << 1LL << "op"
+ << "u"
+ << "ns"
+ << "a.b"
+ << "o" << BSON("_id" << 1 << "x" << 5) << "o2" << BSON("_id" << 1))));
auto res = parseOplogEntryForUpdate(entry);
@@ -120,8 +117,7 @@ TEST_F(WriteOpsRetryability, ParseOplogEntryForUpsert) {
<< "i"
<< "ns"
<< "a.b"
- << "o"
- << BSON("_id" << 1 << "x" << 5))));
+ << "o" << BSON("_id" << 1 << "x" << 5))));
auto res = parseOplogEntryForUpdate(entry);
@@ -187,8 +183,7 @@ TEST_F(FindAndModifyRetryability, BasicUpsertReturnNew) {
kNs, // namespace
BSON("_id"
<< "ID value"
- << "x"
- << 1)); // o
+ << "x" << 1)); // o
auto result = constructFindAndModifyRetryResult(opCtx(), request, insertOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject"
@@ -197,8 +192,7 @@ TEST_F(FindAndModifyRetryability, BasicUpsertReturnNew) {
<< "value"
<< BSON("_id"
<< "ID value"
- << "x"
- << 1)),
+ << "x" << 1)),
result);
}
@@ -212,15 +206,13 @@ TEST_F(FindAndModifyRetryability, BasicUpsertReturnOld) {
kNs, // namespace
BSON("_id"
<< "ID value"
- << "x"
- << 1)); // o
+ << "x" << 1)); // o
auto result = constructFindAndModifyRetryResult(opCtx(), request, insertOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject"
<< BSON("n" << 1 << "updatedExisting" << false << "upserted"
<< "ID value")
- << "value"
- << BSONNULL),
+ << "value" << BSONNULL),
result);
}
@@ -242,8 +234,7 @@ TEST_F(FindAndModifyRetryability, NestedUpsert) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, insertOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject"
<< BSON("n" << 1 << "updatedExisting" << false << "upserted" << 1)
- << "value"
- << BSON("_id" << 1)),
+ << "value" << BSON("_id" << 1)),
result);
}
@@ -353,8 +344,7 @@ TEST_F(FindAndModifyRetryability, UpdateWithPreImage) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, updateOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject" << BSON("n" << 1 << "updatedExisting" << true)
- << "value"
- << BSON("_id" << 1 << "z" << 1)),
+ << "value" << BSON("_id" << 1 << "z" << 1)),
result);
}
@@ -386,8 +376,7 @@ TEST_F(FindAndModifyRetryability, NestedUpdateWithPreImage) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, updateOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject" << BSON("n" << 1 << "updatedExisting" << true)
- << "value"
- << BSON("_id" << 1 << "z" << 1)),
+ << "value" << BSON("_id" << 1 << "z" << 1)),
result);
}
@@ -413,8 +402,7 @@ TEST_F(FindAndModifyRetryability, UpdateWithPostImage) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, updateOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject" << BSON("n" << 1 << "updatedExisting" << true)
- << "value"
- << BSON("a" << 1 << "b" << 1)),
+ << "value" << BSON("a" << 1 << "b" << 1)),
result);
}
@@ -446,8 +434,7 @@ TEST_F(FindAndModifyRetryability, NestedUpdateWithPostImage) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, updateOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject" << BSON("n" << 1 << "updatedExisting" << true)
- << "value"
- << BSON("a" << 1 << "b" << 1)),
+ << "value" << BSON("a" << 1 << "b" << 1)),
result);
}
diff --git a/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp b/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
index 0f55d053fb3..1e4dbc1c303 100644
--- a/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
+++ b/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
@@ -108,15 +108,15 @@ void PeriodicThreadToAbortExpiredTransactions::_init(ServiceContext* serviceCont
_anchor = std::make_shared<PeriodicJobAnchor>(periodicRunner->makeJob(std::move(job)));
- TransactionParticipant::observeTransactionLifetimeLimitSeconds.addObserver([anchor = _anchor](
- const Argument& secs) {
- try {
- anchor->setPeriod(getPeriod(secs));
- } catch (const DBException& ex) {
- log() << "Failed to update period of thread which aborts expired transactions "
- << ex.toStatus();
- }
- });
+ TransactionParticipant::observeTransactionLifetimeLimitSeconds.addObserver(
+ [anchor = _anchor](const Argument& secs) {
+ try {
+ anchor->setPeriod(getPeriod(secs));
+ } catch (const DBException& ex) {
+ log() << "Failed to update period of thread which aborts expired transactions "
+ << ex.toStatus();
+ }
+ });
}
} // namespace mongo
diff --git a/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp b/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp
index d2b7f36ff93..2d92de68fef 100644
--- a/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp
+++ b/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp
@@ -93,7 +93,7 @@ void PeriodicThreadToDecreaseSnapshotHistoryCachePressure::_init(ServiceContext*
_anchor = std::make_shared<PeriodicJobAnchor>(periodicRunner->makeJob(std::move(job)));
SnapshotWindowParams::observeCheckCachePressurePeriodSeconds.addObserver([anchor = _anchor](
- const auto& secs) {
+ const auto& secs) {
try {
anchor->setPeriod(Seconds(secs));
} catch (const DBException& ex) {
diff --git a/src/mongo/db/pipeline/accumulator.h b/src/mongo/db/pipeline/accumulator.h
index 1a2986cb374..c382429e6ea 100644
--- a/src/mongo/db/pipeline/accumulator.h
+++ b/src/mongo/db/pipeline/accumulator.h
@@ -351,4 +351,4 @@ public:
private:
MutableDocument _output;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_avg.cpp b/src/mongo/db/pipeline/accumulator_avg.cpp
index 38946678389..43550e9e361 100644
--- a/src/mongo/db/pipeline/accumulator_avg.cpp
+++ b/src/mongo/db/pipeline/accumulator_avg.cpp
@@ -134,4 +134,4 @@ void AccumulatorAvg::reset() {
_decimalTotal = {};
_count = 0;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_first.cpp b/src/mongo/db/pipeline/accumulator_first.cpp
index 3e452f1d0e6..6fcc334af83 100644
--- a/src/mongo/db/pipeline/accumulator_first.cpp
+++ b/src/mongo/db/pipeline/accumulator_first.cpp
@@ -74,4 +74,4 @@ intrusive_ptr<Accumulator> AccumulatorFirst::create(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
return new AccumulatorFirst(expCtx);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_last.cpp b/src/mongo/db/pipeline/accumulator_last.cpp
index 3c667d16d53..4774abca5e9 100644
--- a/src/mongo/db/pipeline/accumulator_last.cpp
+++ b/src/mongo/db/pipeline/accumulator_last.cpp
@@ -68,4 +68,4 @@ intrusive_ptr<Accumulator> AccumulatorLast::create(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
return new AccumulatorLast(expCtx);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_merge_objects.cpp b/src/mongo/db/pipeline/accumulator_merge_objects.cpp
index 4f8ef357f35..8878ff97676 100644
--- a/src/mongo/db/pipeline/accumulator_merge_objects.cpp
+++ b/src/mongo/db/pipeline/accumulator_merge_objects.cpp
@@ -71,8 +71,7 @@ void AccumulatorMergeObjects::processInternal(const Value& input, bool merging)
uassert(40400,
str::stream() << "$mergeObjects requires object inputs, but input " << input.toString()
- << " is of type "
- << typeName(input.getType()),
+ << " is of type " << typeName(input.getType()),
(input.getType() == BSONType::Object));
FieldIterator iter = input.getDocument().fieldIterator();
diff --git a/src/mongo/db/pipeline/accumulator_min_max.cpp b/src/mongo/db/pipeline/accumulator_min_max.cpp
index d81403eac85..496d9d94220 100644
--- a/src/mongo/db/pipeline/accumulator_min_max.cpp
+++ b/src/mongo/db/pipeline/accumulator_min_max.cpp
@@ -89,4 +89,4 @@ intrusive_ptr<Accumulator> AccumulatorMax::create(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
return new AccumulatorMax(expCtx);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_push.cpp b/src/mongo/db/pipeline/accumulator_push.cpp
index becb6828635..5c1f640cef8 100644
--- a/src/mongo/db/pipeline/accumulator_push.cpp
+++ b/src/mongo/db/pipeline/accumulator_push.cpp
@@ -86,4 +86,4 @@ intrusive_ptr<Accumulator> AccumulatorPush::create(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
return new AccumulatorPush(expCtx);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_std_dev.cpp b/src/mongo/db/pipeline/accumulator_std_dev.cpp
index a10da2a41c0..a2bce628539 100644
--- a/src/mongo/db/pipeline/accumulator_std_dev.cpp
+++ b/src/mongo/db/pipeline/accumulator_std_dev.cpp
@@ -118,4 +118,4 @@ void AccumulatorStdDev::reset() {
_mean = 0;
_m2 = 0;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/aggregation_request.cpp b/src/mongo/db/pipeline/aggregation_request.cpp
index 54a17b0b980..04c806bb992 100644
--- a/src/mongo/db/pipeline/aggregation_request.cpp
+++ b/src/mongo/db/pipeline/aggregation_request.cpp
@@ -130,8 +130,7 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
if (elem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
str::stream() << repl::ReadConcernArgs::kReadConcernFieldName
- << " must be an object, not a "
- << typeName(elem.type())};
+ << " must be an object, not a " << typeName(elem.type())};
}
request.setReadConcern(elem.embeddedObject().getOwned());
} else if (kHintName == fieldName) {
@@ -205,8 +204,8 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
} else if (WriteConcernOptions::kWriteConcernField == fieldName) {
if (elem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << fieldName << " must be an object, not a "
- << typeName(elem.type())};
+ str::stream()
+ << fieldName << " must be an object, not a " << typeName(elem.type())};
}
WriteConcernOptions writeConcern;
@@ -246,23 +245,20 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
if (!hasCursorElem && !hasExplainElem) {
return {ErrorCodes::FailedToParse,
str::stream()
- << "The '"
- << kCursorName
+ << "The '" << kCursorName
<< "' option is required, except for aggregate with the explain argument"};
}
if (request.getExplain() && cmdObj[WriteConcernOptions::kWriteConcernField]) {
return {ErrorCodes::FailedToParse,
str::stream() << "Aggregation explain does not support the'"
- << WriteConcernOptions::kWriteConcernField
- << "' option"};
+ << WriteConcernOptions::kWriteConcernField << "' option"};
}
if (hasNeedsMergeElem && !hasFromMongosElem) {
return {ErrorCodes::FailedToParse,
str::stream() << "Cannot specify '" << kNeedsMergeName << "' without '"
- << kFromMongosName
- << "'"};
+ << kFromMongosName << "'"};
}
return request;
diff --git a/src/mongo/db/pipeline/dependencies.cpp b/src/mongo/db/pipeline/dependencies.cpp
index 6bfdc19bdce..1586a68f96b 100644
--- a/src/mongo/db/pipeline/dependencies.cpp
+++ b/src/mongo/db/pipeline/dependencies.cpp
@@ -282,4 +282,4 @@ Document documentHelper(const BSONObj& bson, const Document& neededFields, int n
Document ParsedDeps::extractFields(const BSONObj& input) const {
return documentHelper(input, _fields, _nFields);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/dependencies.h b/src/mongo/db/pipeline/dependencies.h
index b7e31a6237b..3487584a4a0 100644
--- a/src/mongo/db/pipeline/dependencies.h
+++ b/src/mongo/db/pipeline/dependencies.h
@@ -205,4 +205,4 @@ private:
Document _fields;
int _nFields; // Cache the number of top-level fields needed.
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/dependencies_test.cpp b/src/mongo/db/pipeline/dependencies_test.cpp
index 2fdf25c799b..6d2741a78e4 100644
--- a/src/mongo/db/pipeline/dependencies_test.cpp
+++ b/src/mongo/db/pipeline/dependencies_test.cpp
@@ -147,8 +147,7 @@ TEST(DependenciesToProjectionTest, ShouldAttemptToExcludeOtherFieldsIfOnlyTextSc
deps.setNeedsMetadata(DepsTracker::MetadataType::TEXT_SCORE, true);
ASSERT_BSONOBJ_EQ(deps.toProjection(),
BSON(Document::metaFieldTextScore << metaTextScore << "_id" << 0
- << "$noFieldsNeeded"
- << 1));
+ << "$noFieldsNeeded" << 1));
}
TEST(DependenciesToProjectionTest,
diff --git a/src/mongo/db/pipeline/document.cpp b/src/mongo/db/pipeline/document.cpp
index 9557d01a414..30ffa5a662d 100644
--- a/src/mongo/db/pipeline/document.cpp
+++ b/src/mongo/db/pipeline/document.cpp
@@ -406,8 +406,7 @@ BSONObjBuilder& operator<<(BSONObjBuilderValueStream& builder, const Document& d
void Document::toBson(BSONObjBuilder* builder, size_t recursionLevel) const {
uassert(ErrorCodes::Overflow,
str::stream() << "cannot convert document to BSON because it exceeds the limit of "
- << BSONDepth::getMaxAllowableDepth()
- << " levels of nesting",
+ << BSONDepth::getMaxAllowableDepth() << " levels of nesting",
recursionLevel <= BSONDepth::getMaxAllowableDepth());
for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
@@ -646,4 +645,4 @@ Document Document::deserializeForSorter(BufReader& buf, const SorterDeserializeS
return doc.freeze();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document.h b/src/mongo/db/pipeline/document.h
index d89389dbabe..4a67439dee0 100644
--- a/src/mongo/db/pipeline/document.h
+++ b/src/mongo/db/pipeline/document.h
@@ -717,4 +717,4 @@ inline MutableValue MutableValue::getField(Position pos) {
inline MutableValue MutableValue::getField(StringData key) {
return MutableDocument(*this).getField(key);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_internal.h b/src/mongo/db/pipeline/document_internal.h
index f5dc33e0ae4..c87ab70f43e 100644
--- a/src/mongo/db/pipeline/document_internal.h
+++ b/src/mongo/db/pipeline/document_internal.h
@@ -529,4 +529,4 @@ private:
friend class DocumentStorageIterator;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_add_fields.cpp b/src/mongo/db/pipeline/document_source_add_fields.cpp
index 8784a5ebf16..dd7550a0c1c 100644
--- a/src/mongo/db/pipeline/document_source_add_fields.cpp
+++ b/src/mongo/db/pipeline/document_source_add_fields.cpp
@@ -83,4 +83,4 @@ intrusive_ptr<DocumentSource> DocumentSourceAddFields::createFromBson(
return DocumentSourceAddFields::create(elem.Obj(), expCtx, specifiedName);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_bucket.cpp b/src/mongo/db/pipeline/document_source_bucket.cpp
index e7efd9b202e..3245d21b742 100644
--- a/src/mongo/db/pipeline/document_source_bucket.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket.cpp
@@ -37,8 +37,8 @@
namespace mongo {
using boost::intrusive_ptr;
-using std::vector;
using std::list;
+using std::vector;
REGISTER_MULTI_STAGE_ALIAS(bucket,
LiteParsedDocumentSourceDefault::parse,
@@ -58,8 +58,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
uassert(40201,
str::stream() << "Argument to $bucket stage must be an object, but found type: "
- << typeName(elem.type())
- << ".",
+ << typeName(elem.type()) << ".",
elem.type() == BSONType::Object);
const BSONObj bucketObj = elem.embeddedObject();
@@ -86,15 +85,13 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40202,
str::stream() << "The $bucket 'groupBy' field must be defined as a $-prefixed "
"path or an expression, but found: "
- << groupByField.toString(false, false)
- << ".",
+ << groupByField.toString(false, false) << ".",
groupByIsExpressionInObject || groupByIsPrefixedPath);
} else if ("boundaries" == argName) {
uassert(
40200,
str::stream() << "The $bucket 'boundaries' field must be an array, but found type: "
- << typeName(argument.type())
- << ".",
+ << typeName(argument.type()) << ".",
argument.type() == BSONType::Array);
for (auto&& boundaryElem : argument.embeddedObject()) {
@@ -102,8 +99,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40191,
str::stream() << "The $bucket 'boundaries' field must be an array of "
"constant values, but found value: "
- << boundaryElem.toString(false, false)
- << ".",
+ << boundaryElem.toString(false, false) << ".",
exprConst);
boundaryValues.push_back(exprConst->getValue());
}
@@ -111,8 +107,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40192,
str::stream()
<< "The $bucket 'boundaries' field must have at least 2 values, but found "
- << boundaryValues.size()
- << " value(s).",
+ << boundaryValues.size() << " value(s).",
boundaryValues.size() >= 2);
// Make sure that the boundaries are unique, sorted in ascending order, and have the
@@ -126,22 +121,14 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40193,
str::stream() << "All values in the the 'boundaries' option to $bucket "
"must have the same type. Found conflicting types "
- << typeName(lower.getType())
- << " and "
- << typeName(upper.getType())
- << ".",
+ << typeName(lower.getType()) << " and "
+ << typeName(upper.getType()) << ".",
lowerCanonicalType == upperCanonicalType);
uassert(40194,
str::stream()
<< "The 'boundaries' option to $bucket must be sorted, but elements "
- << i - 1
- << " and "
- << i
- << " are not in ascending order ("
- << lower.toString()
- << " is not less than "
- << upper.toString()
- << ").",
+ << i - 1 << " and " << i << " are not in ascending order ("
+ << lower.toString() << " is not less than " << upper.toString() << ").",
pExpCtx->getValueComparator().evaluate(lower < upper));
}
} else if ("default" == argName) {
@@ -151,8 +138,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40195,
str::stream()
<< "The $bucket 'default' field must be a constant expression, but found: "
- << argument.toString(false, false)
- << ".",
+ << argument.toString(false, false) << ".",
exprConst);
defaultValue = exprConst->getValue();
@@ -162,8 +148,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(
40196,
str::stream() << "The $bucket 'output' field must be an object, but found type: "
- << typeName(argument.type())
- << ".",
+ << typeName(argument.type()) << ".",
argument.type() == BSONType::Object);
for (auto&& outputElem : argument.embeddedObject()) {
diff --git a/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp b/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
index ecb3bf08007..f5a0600ae1b 100644
--- a/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
@@ -51,10 +51,10 @@
namespace mongo {
namespace {
+using boost::intrusive_ptr;
using std::deque;
-using std::vector;
using std::string;
-using boost::intrusive_ptr;
+using std::vector;
class BucketAutoTests : public AggregationContextFixture {
public:
diff --git a/src/mongo/db/pipeline/document_source_change_stream.cpp b/src/mongo/db/pipeline/document_source_change_stream.cpp
index 5e5861f2971..9050b9990dd 100644
--- a/src/mongo/db/pipeline/document_source_change_stream.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream.cpp
@@ -147,9 +147,7 @@ void DocumentSourceChangeStream::checkValueType(const Value v,
BSONType expectedType) {
uassert(40532,
str::stream() << "Entry field \"" << filedName << "\" should be "
- << typeName(expectedType)
- << ", found: "
- << typeName(v.getType()),
+ << typeName(expectedType) << ", found: " << typeName(v.getType()),
(v.getType() == expectedType));
}
@@ -402,11 +400,12 @@ list<intrusive_ptr<DocumentSource>> buildPipeline(const intrusive_ptr<Expression
// There might not be a starting point if we're on mongos, otherwise we should either have a
// 'resumeAfter' starting point, or should start from the latest majority committed operation.
auto replCoord = repl::ReplicationCoordinator::get(expCtx->opCtx);
- uassert(40573,
- "The $changeStream stage is only supported on replica sets",
- expCtx->inMongos || (replCoord &&
- replCoord->getReplicationMode() ==
- repl::ReplicationCoordinator::Mode::modeReplSet));
+ uassert(
+ 40573,
+ "The $changeStream stage is only supported on replica sets",
+ expCtx->inMongos ||
+ (replCoord &&
+ replCoord->getReplicationMode() == repl::ReplicationCoordinator::Mode::modeReplSet));
if (!startFrom && !expCtx->inMongos) {
startFrom = replCoord->getMyLastAppliedOpTime().getTimestamp();
}
@@ -464,8 +463,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceChangeStream::createFromBson(
str::stream() << "unrecognized value for the 'fullDocument' option to the "
"$changeStream stage. Expected \"default\" or "
"\"updateLookup\", got \""
- << fullDocOption
- << "\"",
+ << fullDocOption << "\"",
fullDocOption == "updateLookup"_sd || fullDocOption == "default"_sd);
const bool shouldLookupPostImage = (fullDocOption == "updateLookup"_sd);
diff --git a/src/mongo/db/pipeline/document_source_change_stream_test.cpp b/src/mongo/db/pipeline/document_source_change_stream_test.cpp
index 65a2ed55821..acc9d82e1f8 100644
--- a/src/mongo/db/pipeline/document_source_change_stream_test.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream_test.cpp
@@ -62,8 +62,8 @@ namespace mongo {
namespace {
using boost::intrusive_ptr;
-using repl::OpTypeEnum;
using repl::OplogEntry;
+using repl::OpTypeEnum;
using std::list;
using std::string;
using std::vector;
@@ -423,8 +423,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAtOperationTimeAndResumeAfter
BSON(DSChangeStream::kStageName
<< BSON("resumeAfter"
<< makeResumeToken(kDefaultTs, testUuid(), BSON("x" << 2 << "_id" << 1))
- << "startAtOperationTime"
- << kDefaultTs))
+ << "startAtOperationTime" << kDefaultTs))
.firstElement(),
expCtx),
AssertionException,
@@ -467,8 +466,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAtOperationTimeAndStartAfterO
BSON(DSChangeStream::kStageName
<< BSON("startAfter"
<< makeResumeToken(kDefaultTs, testUuid(), BSON("x" << 2 << "_id" << 1))
- << "startAtOperationTime"
- << kDefaultTs))
+ << "startAtOperationTime" << kDefaultTs))
.firstElement(),
expCtx),
AssertionException,
@@ -629,7 +627,8 @@ TEST_F(ChangeStreamStageTest, TransformUpdateFields) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}},
{
- "updateDescription", D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
+ "updateDescription",
+ D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
},
};
checkTransformation(updateField, expectedUpdateField);
@@ -655,7 +654,8 @@ TEST_F(ChangeStreamStageTest, TransformUpdateFieldsLegacyNoId) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{"x", 1}, {"y", 1}}},
{
- "updateDescription", D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
+ "updateDescription",
+ D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
},
};
checkTransformation(updateField, expectedUpdateField);
@@ -679,7 +679,8 @@ TEST_F(ChangeStreamStageTest, TransformRemoveFields) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{{"_id", 1}, {"x", 2}}}},
{
- "updateDescription", D{{"updatedFields", D{}}, {"removedFields", vector<V>{V("y"_sd)}}},
+ "updateDescription",
+ D{{"updatedFields", D{}}, {"removedFields", vector<V>{V("y"_sd)}}},
}};
checkTransformation(removeField, expectedRemoveField);
}
@@ -1374,7 +1375,8 @@ TEST_F(ChangeStreamStageTest, ClusterTimeMatchesOplogEntry) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}},
{
- "updateDescription", D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
+ "updateDescription",
+ D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
},
};
checkTransformation(updateField, expectedUpdateField);
@@ -1659,9 +1661,9 @@ TEST_F(ChangeStreamStageTest, ResumeAfterWithTokenFromInvalidateShouldFail) {
ResumeTokenData::FromInvalidate::kFromInvalidate);
ASSERT_THROWS_CODE(DSChangeStream::createFromBson(
- BSON(DSChangeStream::kStageName << BSON(
- "resumeAfter" << resumeTokenInvalidate << "startAtOperationTime"
- << kDefaultTs))
+ BSON(DSChangeStream::kStageName
+ << BSON("resumeAfter" << resumeTokenInvalidate
+ << "startAtOperationTime" << kDefaultTs))
.firstElement(),
expCtx),
AssertionException,
@@ -1826,7 +1828,8 @@ TEST_F(ChangeStreamStageDBTest, TransformRemoveFields) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{{"_id", 1}, {"x", 2}}}},
{
- "updateDescription", D{{"updatedFields", D{}}, {"removedFields", vector<V>{V("y"_sd)}}},
+ "updateDescription",
+ D{{"updatedFields", D{}}, {"removedFields", vector<V>{V("y"_sd)}}},
}};
checkTransformation(removeField, expectedRemoveField);
}
diff --git a/src/mongo/db/pipeline/document_source_coll_stats.cpp b/src/mongo/db/pipeline/document_source_coll_stats.cpp
index a02a6018231..bbf031b9f08 100644
--- a/src/mongo/db/pipeline/document_source_coll_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_coll_stats.cpp
@@ -62,35 +62,28 @@ intrusive_ptr<DocumentSource> DocumentSourceCollStats::createFromBson(
if ("latencyStats" == fieldName) {
uassert(40167,
str::stream() << "latencyStats argument must be an object, but got " << elem
- << " of type "
- << typeName(elem.type()),
+ << " of type " << typeName(elem.type()),
elem.type() == BSONType::Object);
if (!elem["histograms"].eoo()) {
uassert(40305,
str::stream() << "histograms option to latencyStats must be bool, got "
- << elem
- << "of type "
- << typeName(elem.type()),
+ << elem << "of type " << typeName(elem.type()),
elem["histograms"].isBoolean());
}
} else if ("storageStats" == fieldName) {
uassert(40279,
str::stream() << "storageStats argument must be an object, but got " << elem
- << " of type "
- << typeName(elem.type()),
+ << " of type " << typeName(elem.type()),
elem.type() == BSONType::Object);
} else if ("count" == fieldName) {
uassert(40480,
str::stream() << "count argument must be an object, but got " << elem
- << " of type "
- << typeName(elem.type()),
+ << " of type " << typeName(elem.type()),
elem.type() == BSONType::Object);
} else if ("queryExecStats" == fieldName) {
uassert(31141,
str::stream() << "queryExecStats argument must be an empty object, but got "
- << elem
- << " of type "
- << typeName(elem.type()),
+ << elem << " of type " << typeName(elem.type()),
elem.type() == BSONType::Object);
uassert(31170,
str::stream() << "queryExecStats argument must be an empty object, but got "
@@ -155,8 +148,8 @@ DocumentSource::GetNextResult DocumentSourceCollStats::getNext() {
pExpCtx->opCtx, pExpCtx->ns, &builder);
if (!status.isOK()) {
uasserted(40481,
- str::stream() << "Unable to retrieve count in $collStats stage: "
- << status.reason());
+ str::stream()
+ << "Unable to retrieve count in $collStats stage: " << status.reason());
}
}
diff --git a/src/mongo/db/pipeline/document_source_current_op.cpp b/src/mongo/db/pipeline/document_source_current_op.cpp
index ba9c11d9e98..72225d8185f 100644
--- a/src/mongo/db/pipeline/document_source_current_op.cpp
+++ b/src/mongo/db/pipeline/document_source_current_op.cpp
@@ -155,9 +155,7 @@ DocumentSource::GetNextResult DocumentSourceCurrentOp::getNext() {
if (fieldName == kOpIdFieldName) {
uassert(ErrorCodes::TypeMismatch,
str::stream() << "expected numeric opid for $currentOp response from '"
- << _shardName
- << "' but got: "
- << typeName(elt.type()),
+ << _shardName << "' but got: " << typeName(elt.type()),
elt.isNumber());
std::string shardOpID = (str::stream() << _shardName << ":" << elt.numberInt());
@@ -258,8 +256,8 @@ intrusive_ptr<DocumentSource> DocumentSourceCurrentOp::createFromBson(
: BacktraceMode::kExcludeBacktrace);
} else {
uasserted(ErrorCodes::FailedToParse,
- str::stream() << "Unrecognized option '" << fieldName
- << "' in $currentOp stage.");
+ str::stream()
+ << "Unrecognized option '" << fieldName << "' in $currentOp stage.");
}
}
diff --git a/src/mongo/db/pipeline/document_source_current_op.h b/src/mongo/db/pipeline/document_source_current_op.h
index 537f4662fee..0e86973a009 100644
--- a/src/mongo/db/pipeline/document_source_current_op.h
+++ b/src/mongo/db/pipeline/document_source_current_op.h
@@ -83,8 +83,7 @@ public:
uassert(ErrorCodes::InvalidOptions,
str::stream() << "Aggregation stage " << kStageName << " cannot run with a "
<< "readConcern other than 'local', or in a multi-document "
- << "transaction. Current readConcern: "
- << readConcern.toString(),
+ << "transaction. Current readConcern: " << readConcern.toString(),
readConcern.getLevel() == repl::ReadConcernLevel::kLocalReadConcern);
}
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 9c2d124f34d..22d263741c2 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -337,4 +337,4 @@ intrusive_ptr<DocumentSourceCursor> DocumentSourceCursor::create(
new DocumentSourceCursor(collection, std::move(exec), pExpCtx, trackOplogTimestamp));
return source;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_exchange.cpp b/src/mongo/db/pipeline/document_source_exchange.cpp
index 96efa899d29..b45938c45d0 100644
--- a/src/mongo/db/pipeline/document_source_exchange.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange.cpp
@@ -124,9 +124,7 @@ Exchange::Exchange(ExchangeSpec spec, std::unique_ptr<Pipeline, PipelineDeleter>
uassert(50951,
str::stream() << "Specified exchange buffer size (" << _maxBufferSize
- << ") exceeds the maximum allowable amount ("
- << kMaxBufferSize
- << ").",
+ << ") exceeds the maximum allowable amount (" << kMaxBufferSize << ").",
_maxBufferSize <= kMaxBufferSize);
for (int idx = 0; idx < _spec.getConsumers(); ++idx) {
@@ -205,8 +203,7 @@ std::vector<size_t> Exchange::extractConsumerIds(
uassert(50950,
str::stream() << "Specified number of exchange consumers (" << nConsumers
- << ") exceeds the maximum allowable amount ("
- << kMaxNumberConsumers
+ << ") exceeds the maximum allowable amount (" << kMaxNumberConsumers
<< ").",
nConsumers <= kMaxNumberConsumers);
@@ -415,8 +412,9 @@ size_t Exchange::getTargetConsumer(const Document& input) {
}
if (elem.type() == BSONType::String && elem.str() == "hashed") {
- kb << "" << BSONElementHasher::hash64(BSON("" << value).firstElement(),
- BSONElementHasher::DEFAULT_HASH_SEED);
+ kb << ""
+ << BSONElementHasher::hash64(BSON("" << value).firstElement(),
+ BSONElementHasher::DEFAULT_HASH_SEED);
} else {
kb << "" << value;
}
diff --git a/src/mongo/db/pipeline/document_source_exchange_test.cpp b/src/mongo/db/pipeline/document_source_exchange_test.cpp
index cd66171a246..ef4f626e7b6 100644
--- a/src/mongo/db/pipeline/document_source_exchange_test.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange_test.cpp
@@ -556,7 +556,6 @@ TEST_F(DocumentSourceExchangeTest, RandomExchangeNConsumerResourceYielding) {
ThreadInfo* threadInfo = &threads[id];
auto handle = _executor->scheduleWork(
[threadInfo, &processedDocs](const executor::TaskExecutor::CallbackArgs& cb) {
-
DocumentSourceExchange* exchange = threadInfo->documentSourceExchange.get();
const auto getNext = [exchange, threadInfo]() {
// Will acquire 'artificalGlobalMutex'. Within getNext() it will be released and
@@ -652,8 +651,7 @@ TEST_F(DocumentSourceExchangeTest, RangeRandomHashExchangeNConsumer) {
TEST_F(DocumentSourceExchangeTest, RejectNoConsumers) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 0);
+ << "consumers" << 0);
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
AssertionException,
@@ -663,10 +661,7 @@ TEST_F(DocumentSourceExchangeTest, RejectNoConsumers) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidKey) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 2));
+ << "consumers" << 1 << "key" << BSON("a" << 2));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
AssertionException,
@@ -676,9 +671,7 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidKey) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyHashExpected) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 1
- << "key"
+ << "consumers" << 1 << "key"
<< BSON("a"
<< "nothash"));
ASSERT_THROWS_CODE(
@@ -690,10 +683,7 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyHashExpected) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyWrongType) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << true));
+ << "consumers" << 1 << "key" << BSON("a" << true));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
AssertionException,
@@ -703,10 +693,7 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyWrongType) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyEmpty) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 1
- << "key"
- << BSON("" << 1));
+ << "consumers" << 1 << "key" << BSON("" << 1));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
AssertionException,
@@ -716,13 +703,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyEmpty) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundaries) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MAXKEY) << BSON("a" << MINKEY))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MAXKEY) << BSON("a" << MINKEY)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -733,13 +715,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundaries) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesMissingMin) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << 0) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << 0) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -750,13 +727,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesMissingMin) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesMissingMax) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << 0))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << 0)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -767,13 +739,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesMissingMax) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesAndConsumerIds) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 2
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 2 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(0 << 1));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -784,13 +751,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesAndConsumerIds) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidPolicyBoundaries) {
BSONObj spec = BSON("policy"
<< "roundrobin"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -801,13 +763,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidPolicyBoundaries) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidConsumerIds) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(1));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -818,11 +775,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidConsumerIds) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidMissingKeys) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 1 << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
diff --git a/src/mongo/db/pipeline/document_source_facet.cpp b/src/mongo/db/pipeline/document_source_facet.cpp
index 95214ab1560..16d14e1edf4 100644
--- a/src/mongo/db/pipeline/document_source_facet.cpp
+++ b/src/mongo/db/pipeline/document_source_facet.cpp
@@ -93,11 +93,8 @@ vector<pair<string, vector<BSONObj>>> extractRawPipelines(const BSONElement& ele
for (auto&& subPipeElem : facetElem.Obj()) {
uassert(40171,
str::stream() << "elements of arrays in $facet spec must be non-empty objects, "
- << facetName
- << " argument contained an element of type "
- << typeName(subPipeElem.type())
- << ": "
- << subPipeElem,
+ << facetName << " argument contained an element of type "
+ << typeName(subPipeElem.type()) << ": " << subPipeElem,
subPipeElem.type() == BSONType::Object);
rawPipeline.push_back(subPipeElem.embeddedObject());
}
@@ -350,8 +347,7 @@ intrusive_ptr<DocumentSource> DocumentSourceFacet::createFromBson(
}
uassert(ErrorCodes::IllegalOperation,
str::stream() << "$facet pipeline '" << *needsMongoS
- << "' must run on mongoS, but '"
- << *needsShard
+ << "' must run on mongoS, but '" << *needsShard
<< "' requires a shard",
!(needsShard && needsMongoS));
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.cpp b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
index 471f93f04f0..c96ac4b4114 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
@@ -211,8 +211,7 @@ void DocumentSourceGraphLookUp::doBreadthFirstSearch() {
while (auto next = pipeline->getNext()) {
uassert(40271,
str::stream()
- << "Documents in the '"
- << _from.ns()
+ << "Documents in the '" << _from.ns()
<< "' namespace must contain an _id for de-duplication in $graphLookup",
!(*next)["_id"].missing());
@@ -392,10 +391,8 @@ void DocumentSourceGraphLookUp::serializeToArray(
std::vector<Value>& array, boost::optional<ExplainOptions::Verbosity> explain) const {
// Serialize default options.
MutableDocument spec(DOC("from" << _from.coll() << "as" << _as.fullPath() << "connectToField"
- << _connectToField.fullPath()
- << "connectFromField"
- << _connectFromField.fullPath()
- << "startWith"
+ << _connectToField.fullPath() << "connectFromField"
+ << _connectFromField.fullPath() << "startWith"
<< _startWith->serialize(false)));
// depthField is optional; serialize it if it was specified.
@@ -414,10 +411,10 @@ void DocumentSourceGraphLookUp::serializeToArray(
// If we are explaining, include an absorbed $unwind inside the $graphLookup specification.
if (_unwind && explain) {
const boost::optional<FieldPath> indexPath = (*_unwind)->indexPath();
- spec["unwinding"] = Value(DOC("preserveNullAndEmptyArrays"
- << (*_unwind)->preserveNullAndEmptyArrays()
- << "includeArrayIndex"
- << (indexPath ? Value((*indexPath).fullPath()) : Value())));
+ spec["unwinding"] =
+ Value(DOC("preserveNullAndEmptyArrays"
+ << (*_unwind)->preserveNullAndEmptyArrays() << "includeArrayIndex"
+ << (indexPath ? Value((*indexPath).fullPath()) : Value())));
}
array.push_back(Value(DOC(getSourceName() << spec.freeze())));
@@ -550,8 +547,8 @@ intrusive_ptr<DocumentSource> DocumentSourceGraphLookUp::createFromBson(
argName == "depthField" || argName == "connectToField") {
// All remaining arguments to $graphLookup are expected to be strings.
uassert(40103,
- str::stream() << "expected string as argument for " << argName << ", found: "
- << argument.toString(false, false),
+ str::stream() << "expected string as argument for " << argName
+ << ", found: " << argument.toString(false, false),
argument.type() == String);
}
@@ -567,8 +564,8 @@ intrusive_ptr<DocumentSource> DocumentSourceGraphLookUp::createFromBson(
depthField = boost::optional<FieldPath>(FieldPath(argument.String()));
} else {
uasserted(40104,
- str::stream() << "Unknown argument to $graphLookup: "
- << argument.fieldName());
+ str::stream()
+ << "Unknown argument to $graphLookup: " << argument.fieldName());
}
}
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp b/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp
index 0e402da49a1..27b364ca2cd 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp
+++ b/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp
@@ -247,10 +247,8 @@ TEST_F(DocumentSourceGraphLookUpTest,
ASSERT(next.isEOF());
} else {
FAIL(str::stream() << "Expected either [ " << to0from1.toString() << " ] or [ "
- << to0from2.toString()
- << " ] but found [ "
- << next.getDocument().toString()
- << " ]");
+ << to0from2.toString() << " ] but found [ "
+ << next.getDocument().toString() << " ]");
}
}
diff --git a/src/mongo/db/pipeline/document_source_group_test.cpp b/src/mongo/db/pipeline/document_source_group_test.cpp
index 998255cfee8..42411f52660 100644
--- a/src/mongo/db/pipeline/document_source_group_test.cpp
+++ b/src/mongo/db/pipeline/document_source_group_test.cpp
@@ -215,10 +215,10 @@ TEST_F(DocumentSourceGroupTest, ShouldReportMultipleFieldGroupKeysAsARename) {
std::vector<std::pair<std::string, boost::intrusive_ptr<Expression>&>> expressions;
auto doc = std::vector<std::pair<std::string, boost::intrusive_ptr<Expression>>>{{"x", x},
{"y", y}};
- for (auto & [ unused, expression ] : doc)
+ for (auto& [unused, expression] : doc)
children.push_back(std::move(expression));
std::vector<boost::intrusive_ptr<Expression>>::size_type index = 0;
- for (auto & [ fieldName, unused ] : doc) {
+ for (auto& [fieldName, unused] : doc) {
expressions.emplace_back(fieldName, children[index]);
++index;
}
@@ -523,8 +523,9 @@ class AggregateObjectExpression : public ExpressionBase {
return BSON("a" << 6);
}
BSONObj spec() {
- return BSON("_id" << 0 << "z" << BSON("$first" << BSON("x"
- << "$a")));
+ return BSON("_id" << 0 << "z"
+ << BSON("$first" << BSON("x"
+ << "$a")));
}
BSONObj expected() {
return BSON("_id" << 0 << "z" << BSON("x" << 6));
@@ -537,8 +538,9 @@ class AggregateOperatorExpression : public ExpressionBase {
return BSON("a" << 6);
}
BSONObj spec() {
- return BSON("_id" << 0 << "z" << BSON("$first"
- << "$a"));
+ return BSON("_id" << 0 << "z"
+ << BSON("$first"
+ << "$a"));
}
BSONObj expected() {
return BSON("_id" << 0 << "z" << 6);
@@ -635,8 +637,9 @@ class SingleDocument : public CheckResultsBase {
return {DOC("a" << 1)};
}
virtual BSONObj groupSpec() {
- return BSON("_id" << 0 << "a" << BSON("$sum"
- << "$a"));
+ return BSON("_id" << 0 << "a"
+ << BSON("$sum"
+ << "$a"));
}
virtual string expectedResultSetString() {
return "[{_id:0,a:1}]";
@@ -649,8 +652,9 @@ class TwoValuesSingleKey : public CheckResultsBase {
return {DOC("a" << 1), DOC("a" << 2)};
}
virtual BSONObj groupSpec() {
- return BSON("_id" << 0 << "a" << BSON("$push"
- << "$a"));
+ return BSON("_id" << 0 << "a"
+ << BSON("$push"
+ << "$a"));
}
virtual string expectedResultSetString() {
return "[{_id:0,a:[1,2]}]";
@@ -708,8 +712,7 @@ class FourValuesTwoKeysTwoAccumulators : public CheckResultsBase {
<< "list"
<< BSON("$push"
<< "$a")
- << "sum"
- << BSON("$sum" << BSON("$divide" << BSON_ARRAY("$a" << 2))));
+ << "sum" << BSON("$sum" << BSON("$divide" << BSON_ARRAY("$a" << 2))));
}
virtual string expectedResultSetString() {
return "[{_id:0,list:[1,3],sum:2},{_id:1,list:[2,4],sum:3}]";
@@ -770,8 +773,9 @@ class UndefinedAccumulatorValue : public CheckResultsBase {
return {Document()};
}
virtual BSONObj groupSpec() {
- return BSON("_id" << 0 << "first" << BSON("$first"
- << "$missing"));
+ return BSON("_id" << 0 << "first"
+ << BSON("$first"
+ << "$missing"));
}
virtual string expectedResultSetString() {
return "[{_id:0, first:null}]";
diff --git a/src/mongo/db/pipeline/document_source_index_stats.cpp b/src/mongo/db/pipeline/document_source_index_stats.cpp
index 23343699114..c24671624f6 100644
--- a/src/mongo/db/pipeline/document_source_index_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_index_stats.cpp
@@ -85,4 +85,4 @@ Value DocumentSourceIndexStats::serialize(
boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << Document()));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp
index b4dd8a61adf..13a0c173424 100644
--- a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp
+++ b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp
@@ -65,4 +65,4 @@ Value DocumentSourceInternalInhibitOptimization::serialize(
return Value(Document{{getSourceName(), Value{Document{}}}});
}
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h
index 86b919fb848..75f3e637a7d 100644
--- a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h
+++ b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h
@@ -73,4 +73,4 @@ private:
Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
};
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp b/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp
index 3b7eb1f86a8..0eb5a85f0d0 100644
--- a/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp
+++ b/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp
@@ -69,14 +69,12 @@ boost::intrusive_ptr<DocumentSource> DocumentSourceInternalSplitPipeline::create
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "unrecognized field while parsing mergeType: '"
- << elt.fieldNameStringData()
- << "'");
+ << elt.fieldNameStringData() << "'");
}
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "unrecognized field while parsing $_internalSplitPipeline: '"
- << elt.fieldNameStringData()
- << "'");
+ << elt.fieldNameStringData() << "'");
}
}
@@ -120,4 +118,4 @@ Value DocumentSourceInternalSplitPipeline::serialize(
mergeTypeString.empty() ? Value() : Value(mergeTypeString)}}}}});
}
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_internal_split_pipeline.h b/src/mongo/db/pipeline/document_source_internal_split_pipeline.h
index 9d58b7e3fd5..d2d4b14e685 100644
--- a/src/mongo/db/pipeline/document_source_internal_split_pipeline.h
+++ b/src/mongo/db/pipeline/document_source_internal_split_pipeline.h
@@ -85,4 +85,4 @@ private:
HostTypeRequirement _mergeType = HostTypeRequirement::kNone;
};
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp
index 3c189798db9..9195d0aa0aa 100644
--- a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp
+++ b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp
@@ -51,8 +51,7 @@ DocumentSource::GetNextResult DocumentSourceListCachedAndActiveUsers::getNext()
const auto info = std::move(_users.back());
_users.pop_back();
return Document(BSON("username" << info.userName.getUser() << "db" << info.userName.getDB()
- << "active"
- << info.active));
+ << "active" << info.active));
}
return GetNextResult::makeEOF();
diff --git a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h
index 3a9efde6261..51dea36c162 100644
--- a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h
+++ b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h
@@ -73,8 +73,7 @@ public:
uassert(ErrorCodes::InvalidOptions,
str::stream() << "Aggregation stage " << kStageName << " cannot run with a "
<< "readConcern other than 'local', or in a multi-document "
- << "transaction. Current readConcern: "
- << readConcern.toString(),
+ << "transaction. Current readConcern: " << readConcern.toString(),
readConcern.getLevel() == repl::ReadConcernLevel::kLocalReadConcern);
}
};
diff --git a/src/mongo/db/pipeline/document_source_list_local_sessions.h b/src/mongo/db/pipeline/document_source_list_local_sessions.h
index 853ea2034e5..74403ac9d45 100644
--- a/src/mongo/db/pipeline/document_source_list_local_sessions.h
+++ b/src/mongo/db/pipeline/document_source_list_local_sessions.h
@@ -84,8 +84,7 @@ public:
uassert(ErrorCodes::InvalidOptions,
str::stream() << "Aggregation stage " << kStageName << " cannot run with a "
<< "readConcern other than 'local', or in a multi-document "
- << "transaction. Current readConcern: "
- << readConcern.toString(),
+ << "transaction. Current readConcern: " << readConcern.toString(),
readConcern.getLevel() == repl::ReadConcernLevel::kLocalReadConcern);
}
diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp
index b2a81168a77..f14b3db0394 100644
--- a/src/mongo/db/pipeline/document_source_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup.cpp
@@ -261,8 +261,7 @@ DocumentSource::GetNextResult DocumentSourceLookUp::getNext() {
objsize += result->getApproximateSize();
uassert(4568,
str::stream() << "Total size of documents in " << _fromNs.coll()
- << " matching pipeline's $lookup stage exceeds "
- << maxBytes
+ << " matching pipeline's $lookup stage exceeds " << maxBytes
<< " bytes",
objsize <= maxBytes);
@@ -687,8 +686,7 @@ void DocumentSourceLookUp::serializeToArray(
const boost::optional<FieldPath> indexPath = _unwindSrc->indexPath();
output[getSourceName()]["unwinding"] =
Value(DOC("preserveNullAndEmptyArrays"
- << _unwindSrc->preserveNullAndEmptyArrays()
- << "includeArrayIndex"
+ << _unwindSrc->preserveNullAndEmptyArrays() << "includeArrayIndex"
<< (indexPath ? Value(indexPath->fullPath()) : Value())));
}
@@ -810,8 +808,7 @@ intrusive_ptr<DocumentSource> DocumentSourceLookUp::createFromBson(
if (argName == "let") {
uassert(ErrorCodes::FailedToParse,
str::stream() << "$lookup argument '" << argument
- << "' must be an object, is type "
- << argument.type(),
+ << "' must be an object, is type " << argument.type(),
argument.type() == BSONType::Object);
letVariables = argument.Obj();
hasLet = true;
@@ -820,9 +817,7 @@ intrusive_ptr<DocumentSource> DocumentSourceLookUp::createFromBson(
uassert(ErrorCodes::FailedToParse,
str::stream() << "$lookup argument '" << argName << "' must be a string, found "
- << argument
- << ": "
- << argument.type(),
+ << argument << ": " << argument.type(),
argument.type() == BSONType::String);
if (argName == "from") {
diff --git a/src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp b/src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp
index 327fdf6f703..cb24b7b9ae8 100644
--- a/src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp
@@ -43,14 +43,9 @@ Value assertFieldHasType(const Document& fullDoc, StringData fieldName, BSONType
auto val = fullDoc[fieldName];
uassert(40578,
str::stream() << "failed to look up post image after change: expected \"" << fieldName
- << "\" field to have type "
- << typeName(expectedType)
- << ", instead found type "
- << typeName(val.getType())
- << ": "
- << val.toString()
- << ", full object: "
- << fullDoc.toString(),
+ << "\" field to have type " << typeName(expectedType)
+ << ", instead found type " << typeName(val.getType()) << ": "
+ << val.toString() << ", full object: " << fullDoc.toString(),
val.getType() == expectedType);
return val;
}
@@ -88,8 +83,7 @@ NamespaceString DocumentSourceLookupChangePostImage::assertValidNamespace(
// lookup into any namespace.
uassert(40579,
str::stream() << "unexpected namespace during post image lookup: " << nss.ns()
- << ", expected "
- << pExpCtx->ns.ns(),
+ << ", expected " << pExpCtx->ns.ns(),
nss == pExpCtx->ns ||
(pExpCtx->isClusterAggregation() || pExpCtx->isDBAggregation(nss.db())));
@@ -112,8 +106,7 @@ Value DocumentSourceLookupChangePostImage::lookupPostImage(const Document& updat
const auto readConcern = pExpCtx->inMongos
? boost::optional<BSONObj>(BSON("level"
<< "majority"
- << "afterClusterTime"
- << resumeToken.getData().clusterTime))
+ << "afterClusterTime" << resumeToken.getData().clusterTime))
: boost::none;
diff --git a/src/mongo/db/pipeline/document_source_lookup_test.cpp b/src/mongo/db/pipeline/document_source_lookup_test.cpp
index 24d640ea85f..38b17213eaf 100644
--- a/src/mongo/db/pipeline/document_source_lookup_test.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup_test.cpp
@@ -95,9 +95,7 @@ TEST_F(DocumentSourceLookUpTest, PreservesParentPipelineLetVariables) {
auto docSource = DocumentSourceLookUp::createFromBson(
BSON("$lookup" << BSON("from"
<< "coll"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1))) << "as"
<< "as"))
.firstElement(),
expCtx);
@@ -117,9 +115,7 @@ TEST_F(DocumentSourceLookUpTest, AcceptsPipelineSyntax) {
auto docSource = DocumentSourceLookUp::createFromBson(
BSON("$lookup" << BSON("from"
<< "coll"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1))) << "as"
<< "as"))
.firstElement(),
expCtx);
@@ -229,17 +225,17 @@ TEST_F(DocumentSourceLookUpTest, RejectLookupWhenDepthLimitIsExceeded) {
expCtx->subPipelineDepth = DocumentSourceLookUp::kMaxSubPipelineDepth;
- ASSERT_THROWS_CODE(DocumentSourceLookUp::createFromBson(
- BSON("$lookup" << BSON("from"
- << "coll"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
- << "as"))
- .firstElement(),
- expCtx),
- AssertionException,
- ErrorCodes::MaxSubPipelineDepthExceeded);
+ ASSERT_THROWS_CODE(
+ DocumentSourceLookUp::createFromBson(
+ BSON("$lookup" << BSON("from"
+ << "coll"
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "as"
+ << "as"))
+ .firstElement(),
+ expCtx),
+ AssertionException,
+ ErrorCodes::MaxSubPipelineDepthExceeded);
}
TEST_F(ReplDocumentSourceLookUpTest, RejectsPipelineWithChangeStreamStage) {
@@ -286,8 +282,7 @@ TEST_F(DocumentSourceLookUpTest, RejectsLocalFieldForeignFieldWhenPipelineIsSpec
auto lookupStage = DocumentSourceLookUp::createFromBson(
BSON("$lookup" << BSON("from"
<< "coll"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
<< "localField"
<< "a"
<< "foreignField"
@@ -298,8 +293,7 @@ TEST_F(DocumentSourceLookUpTest, RejectsLocalFieldForeignFieldWhenPipelineIsSpec
expCtx);
FAIL(str::stream()
- << "Expected creation of the "
- << lookupStage->getSourceName()
+ << "Expected creation of the " << lookupStage->getSourceName()
<< " stage to uassert on mix of localField/foreignField and pipeline options");
} catch (const AssertionException& ex) {
ASSERT_EQ(ErrorCodes::FailedToParse, ex.code());
@@ -335,50 +329,50 @@ TEST_F(DocumentSourceLookUpTest, RejectsInvalidLetVariableName) {
expCtx->setResolvedNamespaces(StringMap<ExpressionContext::ResolvedNamespace>{
{fromNs.coll().toString(), {fromNs, std::vector<BSONObj>()}}});
- ASSERT_THROWS_CODE(DocumentSourceLookUp::createFromBson(
- BSON("$lookup" << BSON("from"
- << "coll"
- << "let"
- << BSON("" // Empty variable name.
- << "$a")
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
- << "as"))
- .firstElement(),
- expCtx),
- AssertionException,
- 16866);
-
- ASSERT_THROWS_CODE(DocumentSourceLookUp::createFromBson(
- BSON("$lookup" << BSON("from"
- << "coll"
- << "let"
- << BSON("^invalidFirstChar"
- << "$a")
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
- << "as"))
- .firstElement(),
- expCtx),
- AssertionException,
- 16867);
-
- ASSERT_THROWS_CODE(DocumentSourceLookUp::createFromBson(
- BSON("$lookup" << BSON("from"
- << "coll"
- << "let"
- << BSON("contains.invalidChar"
- << "$a")
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
- << "as"))
- .firstElement(),
- expCtx),
- AssertionException,
- 16868);
+ ASSERT_THROWS_CODE(
+ DocumentSourceLookUp::createFromBson(
+ BSON("$lookup" << BSON("from"
+ << "coll"
+ << "let"
+ << BSON("" // Empty variable name.
+ << "$a")
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "as"
+ << "as"))
+ .firstElement(),
+ expCtx),
+ AssertionException,
+ 16866);
+
+ ASSERT_THROWS_CODE(
+ DocumentSourceLookUp::createFromBson(
+ BSON("$lookup" << BSON("from"
+ << "coll"
+ << "let"
+ << BSON("^invalidFirstChar"
+ << "$a")
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "as"
+ << "as"))
+ .firstElement(),
+ expCtx),
+ AssertionException,
+ 16867);
+
+ ASSERT_THROWS_CODE(
+ DocumentSourceLookUp::createFromBson(
+ BSON("$lookup" << BSON("from"
+ << "coll"
+ << "let"
+ << BSON("contains.invalidChar"
+ << "$a")
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "as"
+ << "as"))
+ .firstElement(),
+ expCtx),
+ AssertionException,
+ 16868);
}
TEST_F(DocumentSourceLookUpTest, ShouldBeAbleToReParseSerializedStage) {
@@ -393,9 +387,7 @@ TEST_F(DocumentSourceLookUpTest, ShouldBeAbleToReParseSerializedStage) {
<< "let"
<< BSON("local_x"
<< "$x")
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1))) << "as"
<< "as"))
.firstElement(),
expCtx);
@@ -729,8 +721,7 @@ TEST_F(DocumentSourceLookUpTest, ShouldCacheNonCorrelatedSubPipelinePrefix) {
auto expectedPipe = fromjson(
str::stream() << "[{mock: {}}, {$match: {x:{$eq: 1}}}, {$sort: {sortKey: {x: 1}}}, "
- << sequentialCacheStageObj()
- << ", {$addFields: {varField: {$const: 5} }}]");
+ << sequentialCacheStageObj() << ", {$addFields: {varField: {$const: 5} }}]");
ASSERT_VALUE_EQ(Value(subPipeline->writeExplainOps(kExplain)), Value(BSONArray(expectedPipe)));
}
@@ -914,8 +905,7 @@ TEST_F(DocumentSourceLookUpTest,
str::stream() << "[{mock: {}}, {$match: {x:{$eq: 1}}}, {$sort: {sortKey: {x: 1}}}, "
"{$lookup: {from: 'coll', as: 'subas', let: {var1: '$y'}, "
"pipeline: [{$match: {$expr: { $eq: ['$z', '$$var1']}}}]}}, "
- << sequentialCacheStageObj()
- << ", {$addFields: {varField: {$const: 5} }}]");
+ << sequentialCacheStageObj() << ", {$addFields: {varField: {$const: 5} }}]");
ASSERT_VALUE_EQ(Value(subPipeline->writeExplainOps(kExplain)), Value(BSONArray(expectedPipe)));
}
@@ -947,8 +937,7 @@ TEST_F(DocumentSourceLookUpTest, ShouldCacheEntirePipelineIfNonCorrelated) {
<< "[{mock: {}}, {$match: {x:{$eq: 1}}}, {$sort: {sortKey: {x: 1}}}, {$lookup: {from: "
"'coll', as: 'subas', let: {}, pipeline: [{$match: {y: 5}}]}}, {$addFields: "
"{constField: {$const: 5}}}, "
- << sequentialCacheStageObj()
- << "]");
+ << sequentialCacheStageObj() << "]");
ASSERT_VALUE_EQ(Value(subPipeline->writeExplainOps(kExplain)), Value(BSONArray(expectedPipe)));
}
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
index d29fe8285aa..6ed37705edd 100644
--- a/src/mongo/db/pipeline/document_source_match.cpp
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -49,8 +49,8 @@ namespace mongo {
using boost::intrusive_ptr;
using std::pair;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
REGISTER_DOCUMENT_SOURCE(match,
diff --git a/src/mongo/db/pipeline/document_source_merge.cpp b/src/mongo/db/pipeline/document_source_merge.cpp
index ec09b41fd5c..c4f7e864231 100644
--- a/src/mongo/db/pipeline/document_source_merge.cpp
+++ b/src/mongo/db/pipeline/document_source_merge.cpp
@@ -83,7 +83,7 @@ constexpr auto kPipelineDiscardMode = MergeMode{WhenMatched::kPipeline, WhenNotM
*/
MergeStrategy makeUpdateStrategy(bool upsert, BatchTransform transform) {
return [upsert, transform](
- const auto& expCtx, const auto& ns, const auto& wc, auto epoch, auto&& batch) {
+ const auto& expCtx, const auto& ns, const auto& wc, auto epoch, auto&& batch) {
if (transform) {
transform(batch);
}
@@ -104,7 +104,7 @@ MergeStrategy makeUpdateStrategy(bool upsert, BatchTransform transform) {
*/
MergeStrategy makeStrictUpdateStrategy(bool upsert, BatchTransform transform) {
return [upsert, transform](
- const auto& expCtx, const auto& ns, const auto& wc, auto epoch, auto&& batch) {
+ const auto& expCtx, const auto& ns, const auto& wc, auto epoch, auto&& batch) {
if (transform) {
transform(batch);
}
@@ -408,7 +408,7 @@ boost::intrusive_ptr<DocumentSource> DocumentSourceMerge::createFromBson(
mergeSpec.getWhenMatched() ? mergeSpec.getWhenMatched()->mode : kDefaultWhenMatched;
auto whenNotMatched = mergeSpec.getWhenNotMatched().value_or(kDefaultWhenNotMatched);
auto pipeline = mergeSpec.getWhenMatched() ? mergeSpec.getWhenMatched()->pipeline : boost::none;
- auto[mergeOnFields, targetCollectionVersion] =
+ auto [mergeOnFields, targetCollectionVersion] =
expCtx->mongoProcessInterface->ensureFieldsUniqueOrResolveDocumentKey(
expCtx, mergeSpec.getOn(), mergeSpec.getTargetCollectionVersion(), targetNss);
@@ -431,7 +431,7 @@ Value DocumentSourceMerge::serialize(boost::optional<ExplainOptions::Verbosity>
}
BSONObjBuilder bob;
- for (auto && [ name, expr ] : *_letVariables) {
+ for (auto&& [name, expr] : *_letVariables) {
bob << name << expr->serialize(static_cast<bool>(explain));
}
return bob.obj();
diff --git a/src/mongo/db/pipeline/document_source_merge.h b/src/mongo/db/pipeline/document_source_merge.h
index 927c0376245..f7889528930 100644
--- a/src/mongo/db/pipeline/document_source_merge.h
+++ b/src/mongo/db/pipeline/document_source_merge.h
@@ -180,7 +180,7 @@ private:
}
BSONObjBuilder bob;
- for (auto && [ name, expr ] : *_letVariables) {
+ for (auto&& [name, expr] : *_letVariables) {
bob << name << expr->evaluate(doc, &pExpCtx->variables);
}
return bob.obj();
diff --git a/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp b/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp
index b344097425d..7daca0df406 100644
--- a/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp
@@ -133,8 +133,8 @@ TEST_F(DocumentSourceMergeCursorsTest, ShouldRejectEmptyArray) {
TEST_F(DocumentSourceMergeCursorsTest, ShouldRejectLegacySerializationFormats) {
// Formats like this were used in old versions of the server but are no longer supported.
- auto spec = BSON("$mergeCursors" << BSON_ARRAY(BSON(
- "ns" << kTestNss.ns() << "id" << 0LL << "host" << kTestHost.toString())));
+ auto spec = BSON("$mergeCursors" << BSON_ARRAY(BSON("ns" << kTestNss.ns() << "id" << 0LL
+ << "host" << kTestHost.toString())));
ASSERT_THROWS_CODE(DocumentSourceMergeCursors::createFromBson(spec.firstElement(), getExpCtx()),
AssertionException,
17026);
diff --git a/src/mongo/db/pipeline/document_source_merge_test.cpp b/src/mongo/db/pipeline/document_source_merge_test.cpp
index 32ed7b9f963..cdef17e6ca9 100644
--- a/src/mongo/db/pipeline/document_source_merge_test.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_test.cpp
@@ -140,8 +140,7 @@ TEST_F(DocumentSourceMergeTest, CorrectlyParsesIfWhenMatchedIsStringOrArray) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSONArray()));
+ << "whenMatched" << BSONArray()));
ASSERT(createMergeStage(spec));
}
@@ -214,14 +213,12 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfIntoIsNotAValidUserCollection) {
TEST_F(DocumentSourceMergeTest, FailsToParseIfDbIsNotString) {
auto spec = BSON("$merge" << BSON("into" << BSON("coll"
<< "target_collection"
- << "db"
- << true)));
+ << "db" << true)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into" << BSON("coll"
<< "target_collection"
- << "db"
- << BSONArray())));
+ << "db" << BSONArray())));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into" << BSON("coll"
@@ -235,14 +232,12 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfDbIsNotString) {
TEST_F(DocumentSourceMergeTest, FailsToParseIfCollIsNotString) {
auto spec = BSON("$merge" << BSON("into" << BSON("db"
<< "target_db"
- << "coll"
- << true)));
+ << "coll" << true)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into" << BSON("db"
<< "target_db"
- << "coll"
- << BSONArray())));
+ << "coll" << BSONArray())));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into" << BSON("db"
@@ -264,40 +259,34 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfDbIsNotAValidDatabaseName) {
TEST_F(DocumentSourceMergeTest, FailsToParseIfWhenMatchedModeIsNotStringOrArray) {
auto spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << true));
+ << "whenMatched" << true));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51191);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << 100));
+ << "whenMatched" << 100));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51191);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSON("" << kDefaultWhenMatchedMode)));
+ << "whenMatched" << BSON("" << kDefaultWhenMatchedMode)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51191);
}
TEST_F(DocumentSourceMergeTest, FailsToParseIfWhenNotMatchedModeIsNotString) {
auto spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenNotMatched"
- << true));
+ << "whenNotMatched" << true));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenNotMatched"
- << BSONArray()));
+ << "whenNotMatched" << BSONArray()));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenNotMatched"
- << BSON("" << kDefaultWhenNotMatchedMode)));
+ << "whenNotMatched" << BSON("" << kDefaultWhenNotMatchedMode)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
}
@@ -341,26 +330,22 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfWhenNotMatchedModeIsUnsupportedStr
TEST_F(DocumentSourceMergeTest, FailsToParseIfOnFieldIsNotStringOrArrayOfStrings) {
auto spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "on"
- << 1));
+ << "on" << 1));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51186);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "on"
- << BSONArray()));
+ << "on" << BSONArray()));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51187);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "on"
- << BSON_ARRAY(1 << 2 << BSON("a" << 3))));
+ << "on" << BSON_ARRAY(1 << 2 << BSON("a" << 3))));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51134);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "on"
- << BSON("_id" << 1)));
+ << "on" << BSON("_id" << 1)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51186);
}
@@ -616,24 +601,21 @@ TEST_F(DocumentSourceMergeTest, CorrectlyHandlesWhenMatchedAndWhenNotMatchedMode
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
+ << "whenMatched" << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
<< "whenNotMatched"
<< "insert"));
ASSERT(createMergeStage(spec));
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
+ << "whenMatched" << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
<< "whenNotMatched"
<< "fail"));
ASSERT(createMergeStage(spec));
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
+ << "whenMatched" << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
<< "whenNotMatched"
<< "discard"));
ASSERT(createMergeStage(spec));
@@ -658,41 +640,33 @@ TEST_F(DocumentSourceMergeTest, CorrectlyHandlesWhenMatchedAndWhenNotMatchedMode
TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
auto let = BSON("foo"
<< "bar");
- auto spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << let
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
- << "whenNotMatched"
- << "insert"));
+ auto spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << let << "whenMatched"
+ << BSON_ARRAY(BSON("$project" << BSON("x" << 1))) << "whenNotMatched"
+ << "insert"));
ASSERT(createMergeStage(spec));
- spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << let
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
- << "whenNotMatched"
- << "fail"));
+ spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << let << "whenMatched"
+ << BSON_ARRAY(BSON("$project" << BSON("x" << 1))) << "whenNotMatched"
+ << "fail"));
ASSERT(createMergeStage(spec));
- spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << let
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
- << "whenNotMatched"
- << "discard"));
+ spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << let << "whenMatched"
+ << BSON_ARRAY(BSON("$project" << BSON("x" << 1))) << "whenNotMatched"
+ << "discard"));
ASSERT(createMergeStage(spec));
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "replace"
<< "whenNotMatched"
<< "insert"));
@@ -700,9 +674,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "replace"
<< "whenNotMatched"
<< "fail"));
@@ -710,9 +682,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "replace"
<< "whenNotMatched"
<< "discard"));
@@ -720,9 +690,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "merge"
<< "whenNotMatched"
<< "insert"));
@@ -730,9 +698,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "merge"
<< "whenNotMatched"
<< "fail"));
@@ -740,9 +706,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "merge"
<< "whenNotMatched"
<< "discard"));
@@ -750,9 +714,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "keepExisting"
<< "whenNotMatched"
<< "insert"));
@@ -760,9 +722,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "fail"
<< "whenNotMatched"
<< "insert"));
@@ -770,12 +730,12 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
}
TEST_F(DocumentSourceMergeTest, SerializeDefaultLetVariable) {
- auto spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
- << "whenNotMatched"
- << "insert"));
+ auto spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "whenMatched" << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
+ << "whenNotMatched"
+ << "insert"));
auto mergeStage = createMergeStage(spec);
auto serialized = mergeStage->serialize().getDocument();
ASSERT_VALUE_EQ(serialized["$merge"]["let"],
@@ -796,11 +756,10 @@ TEST_F(DocumentSourceMergeTest, SerializeLetVariables) {
<< BSON("v1" << 10 << "v2"
<< "foo"
<< "v3"
- << BSON("x" << 1 << "y" << BSON("z"
- << "bar")))
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
+ << BSON("x" << 1 << "y"
+ << BSON("z"
+ << "bar")))
+ << "whenMatched" << pipeline << "whenNotMatched"
<< "insert"));
auto mergeStage = createMergeStage(spec);
ASSERT(mergeStage);
@@ -810,8 +769,9 @@ TEST_F(DocumentSourceMergeTest, SerializeLetVariables) {
Value(BSON("$const"
<< "foo")));
ASSERT_VALUE_EQ(serialized["$merge"]["let"]["v3"],
- Value(BSON("x" << BSON("$const" << 1) << "y" << BSON("z" << BSON("$const"
- << "bar")))));
+ Value(BSON("x" << BSON("$const" << 1) << "y"
+ << BSON("z" << BSON("$const"
+ << "bar")))));
ASSERT_VALUE_EQ(serialized["$merge"]["whenMatched"], Value(pipeline));
}
@@ -823,9 +783,7 @@ TEST_F(DocumentSourceMergeTest, SerializeLetArrayVariable) {
<< "target_collection"
<< "let"
<< BSON("v1" << BSON_ARRAY(1 << "2" << BSON("x" << 1 << "y" << 2)))
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
+ << "whenMatched" << pipeline << "whenNotMatched"
<< "insert"));
auto mergeStage = createMergeStage(spec);
ASSERT(mergeStage);
@@ -847,14 +805,11 @@ TEST_F(DocumentSourceMergeTest, SerializeLetArrayVariable) {
TEST_F(DocumentSourceMergeTest, SerializeNullLetVariablesAsDefault) {
auto pipeline = BSON_ARRAY(BSON("$project" << BSON("x"
<< "1")));
- auto spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << BSONNULL
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
- << "insert"));
+ auto spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << BSONNULL << "whenMatched" << pipeline << "whenNotMatched"
+ << "insert"));
auto mergeStage = createMergeStage(spec);
ASSERT(mergeStage);
auto serialized = mergeStage->serialize().getDocument();
@@ -867,14 +822,11 @@ TEST_F(DocumentSourceMergeTest, SerializeNullLetVariablesAsDefault) {
TEST_F(DocumentSourceMergeTest, SerializeEmptyLetVariables) {
auto pipeline = BSON_ARRAY(BSON("$project" << BSON("x"
<< "1")));
- auto spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << BSONObj()
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
- << "insert"));
+ auto spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << BSONObj() << "whenMatched" << pipeline << "whenNotMatched"
+ << "insert"));
auto mergeStage = createMergeStage(spec);
ASSERT(mergeStage);
auto serialized = mergeStage->serialize().getDocument();
@@ -887,11 +839,7 @@ TEST_F(DocumentSourceMergeTest, OnlyObjectCanBeUsedAsLetVariables) {
<< "1")));
auto spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << 1
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
+ << "let" << 1 << "whenMatched" << pipeline << "whenNotMatched"
<< "insert"));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
@@ -899,18 +847,13 @@ TEST_F(DocumentSourceMergeTest, OnlyObjectCanBeUsedAsLetVariables) {
<< "target_collection"
<< "let"
<< "foo"
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
+ << "whenMatched" << pipeline << "whenNotMatched"
<< "insert"));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << BSON_ARRAY(1 << "2")
- << "whenMatched"
- << pipeline
+ << "let" << BSON_ARRAY(1 << "2") << "whenMatched" << pipeline
<< "whenNotMatched"
<< "insert"));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
diff --git a/src/mongo/db/pipeline/document_source_mock.cpp b/src/mongo/db/pipeline/document_source_mock.cpp
index f4efb3e731e..86e9ebda0ee 100644
--- a/src/mongo/db/pipeline/document_source_mock.cpp
+++ b/src/mongo/db/pipeline/document_source_mock.cpp
@@ -76,4 +76,4 @@ intrusive_ptr<DocumentSourceMock> DocumentSourceMock::createForTest(
}
return new DocumentSourceMock(std::move(results));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
index 318dfb47859..2293294534c 100644
--- a/src/mongo/db/pipeline/document_source_out.cpp
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -106,8 +106,8 @@ void DocumentSourceOut::initialize() {
DBClientBase* conn = pExpCtx->mongoProcessInterface->directClient();
const auto& outputNs = getOutputNs();
- _tempNs = NamespaceString(str::stream() << outputNs.db() << ".tmp.agg_out."
- << aggOutCounter.addAndFetch(1));
+ _tempNs = NamespaceString(str::stream()
+ << outputNs.db() << ".tmp.agg_out." << aggOutCounter.addAndFetch(1));
// Save the original collection options and index specs so we can check they didn't change
// during computation.
@@ -123,8 +123,8 @@ void DocumentSourceOut::initialize() {
// We will write all results into a temporary collection, then rename the temporary
// collection to be the target collection once we are done.
- _tempNs = NamespaceString(str::stream() << outputNs.db() << ".tmp.agg_out."
- << aggOutCounter.addAndFetch(1));
+ _tempNs = NamespaceString(str::stream()
+ << outputNs.db() << ".tmp.agg_out." << aggOutCounter.addAndFetch(1));
// Create temp collection, copying options from the existing output collection if any.
{
diff --git a/src/mongo/db/pipeline/document_source_plan_cache_stats.cpp b/src/mongo/db/pipeline/document_source_plan_cache_stats.cpp
index d95043e66fe..dfa460c3f9f 100644
--- a/src/mongo/db/pipeline/document_source_plan_cache_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_plan_cache_stats.cpp
@@ -41,14 +41,14 @@ REGISTER_DOCUMENT_SOURCE(planCacheStats,
boost::intrusive_ptr<DocumentSource> DocumentSourcePlanCacheStats::createFromBson(
BSONElement spec, const boost::intrusive_ptr<ExpressionContext>& pExpCtx) {
- uassert(
- ErrorCodes::FailedToParse,
- str::stream() << kStageName << " value must be an object. Found: " << typeName(spec.type()),
- spec.type() == BSONType::Object);
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << kStageName
+ << " value must be an object. Found: " << typeName(spec.type()),
+ spec.type() == BSONType::Object);
uassert(ErrorCodes::FailedToParse,
- str::stream() << kStageName << " parameters object must be empty. Found: "
- << typeName(spec.type()),
+ str::stream() << kStageName
+ << " parameters object must be empty. Found: " << typeName(spec.type()),
spec.embeddedObject().isEmpty());
uassert(50932,
diff --git a/src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp b/src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp
index 3eec42538f2..6980b400972 100644
--- a/src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp
+++ b/src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp
@@ -159,8 +159,7 @@ TEST_F(DocumentSourcePlanCacheStatsTest, ReturnsOnlyMatchingStatsAfterAbsorbingM
<< "baz"),
BSON("foo"
<< "bar"
- << "match"
- << true)};
+ << "match" << true)};
getExpCtx()->mongoProcessInterface =
std::make_shared<PlanCacheStatsMongoProcessInterface>(stats);
diff --git a/src/mongo/db/pipeline/document_source_queue.cpp b/src/mongo/db/pipeline/document_source_queue.cpp
index 80559de1a71..47a77709363 100644
--- a/src/mongo/db/pipeline/document_source_queue.cpp
+++ b/src/mongo/db/pipeline/document_source_queue.cpp
@@ -55,4 +55,4 @@ DocumentSource::GetNextResult DocumentSourceQueue::getNext() {
_queue.pop_front();
return next;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_redact.cpp b/src/mongo/db/pipeline/document_source_redact.cpp
index 3ff60410a95..7afc1eea75a 100644
--- a/src/mongo/db/pipeline/document_source_redact.cpp
+++ b/src/mongo/db/pipeline/document_source_redact.cpp
@@ -161,8 +161,7 @@ boost::optional<Document> DocumentSourceRedact::redactObject(const Document& roo
uasserted(17053,
str::stream() << "$redact's expression should not return anything "
<< "aside from the variables $$KEEP, $$DESCEND, and "
- << "$$PRUNE, but returned "
- << expressionResult.toString());
+ << "$$PRUNE, but returned " << expressionResult.toString());
}
}
@@ -196,4 +195,4 @@ intrusive_ptr<DocumentSource> DocumentSourceRedact::createFromBson(
return source;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_replace_root.cpp b/src/mongo/db/pipeline/document_source_replace_root.cpp
index e494fe1ea2a..3fe49d83f0b 100644
--- a/src/mongo/db/pipeline/document_source_replace_root.cpp
+++ b/src/mongo/db/pipeline/document_source_replace_root.cpp
@@ -63,11 +63,9 @@ Document ReplaceRootTransformation::applyTransformation(const Document& input) {
uassert(40228,
str::stream() << msgOpener.toString()
<< "must evaluate to an object, but resulting value was: "
- << newRoot.toString()
- << ". Type of resulting value: '"
+ << newRoot.toString() << ". Type of resulting value: '"
<< typeName(newRoot.getType())
- << "'. Input document: "
- << input.toString(),
+ << "'. Input document: " << input.toString(),
newRoot.getType() == BSONType::Object);
// Turn the value into a document.
@@ -97,8 +95,7 @@ intrusive_ptr<DocumentSource> DocumentSourceReplaceRoot::createFromBson(
<< stageName);
uassert(40229,
str::stream() << "expected an object as specification for " << kStageName
- << " stage, got "
- << typeName(elem.type()),
+ << " stage, got " << typeName(elem.type()),
elem.type() == Object);
auto spec =
diff --git a/src/mongo/db/pipeline/document_source_replace_root_test.cpp b/src/mongo/db/pipeline/document_source_replace_root_test.cpp
index 71c356e98f2..cb71448fa7b 100644
--- a/src/mongo/db/pipeline/document_source_replace_root_test.cpp
+++ b/src/mongo/db/pipeline/document_source_replace_root_test.cpp
@@ -336,14 +336,12 @@ TEST_F(ReplaceRootSpec, CreationRequiresObjectSpecification) {
TEST_F(ReplaceRootSpec, OnlyValidOptionInObjectSpecIsNewRoot) {
ASSERT_THROWS_CODE(createReplaceRoot(createSpec(BSON("newRoot"
<< "$a"
- << "root"
- << 2))),
+ << "root" << 2))),
AssertionException,
40415);
ASSERT_THROWS_CODE(createReplaceRoot(createSpec(BSON("newRoot"
<< "$a"
- << "path"
- << 2))),
+ << "path" << 2))),
AssertionException,
40415);
ASSERT_THROWS_CODE(createReplaceRoot(createSpec(BSON("path"
diff --git a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
index a3a33ca6f4c..cddae4ad571 100644
--- a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
@@ -116,9 +116,7 @@ DocumentSource::GetNextResult DocumentSourceSampleFromRandomCursor::getNextNonDu
<< _idField
<< " field in order to de-duplicate results, but encountered a "
"document without a "
- << _idField
- << " field: "
- << nextInput.getDocument().toString(),
+ << _idField << " field: " << nextInput.getDocument().toString(),
!idField.missing());
if (_seenDocs.insert(std::move(idField)).second) {
@@ -163,4 +161,4 @@ intrusive_ptr<DocumentSourceSampleFromRandomCursor> DocumentSourceSampleFromRand
new DocumentSourceSampleFromRandomCursor(expCtx, size, idField, nDocsInCollection));
return source;
}
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_sequential_document_cache.cpp b/src/mongo/db/pipeline/document_source_sequential_document_cache.cpp
index 7fb2933fada..e6d1f3bd7f8 100644
--- a/src/mongo/db/pipeline/document_source_sequential_document_cache.cpp
+++ b/src/mongo/db/pipeline/document_source_sequential_document_cache.cpp
@@ -153,12 +153,12 @@ Value DocumentSourceSequentialDocumentCache::serialize(
{kStageName,
Document{{"maxSizeBytes"_sd, Value(static_cast<long long>(_cache->maxSizeBytes()))},
{"status"_sd,
- _cache->isBuilding() ? "kBuilding"_sd : _cache->isServing()
- ? "kServing"_sd
- : "kAbandoned"_sd}}}});
+ _cache->isBuilding()
+ ? "kBuilding"_sd
+ : _cache->isServing() ? "kServing"_sd : "kAbandoned"_sd}}}});
}
return Value();
}
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_sequential_document_cache.h b/src/mongo/db/pipeline/document_source_sequential_document_cache.h
index c575772e2c9..d8d2ba90db8 100644
--- a/src/mongo/db/pipeline/document_source_sequential_document_cache.h
+++ b/src/mongo/db/pipeline/document_source_sequential_document_cache.h
@@ -103,4 +103,4 @@ private:
bool _hasOptimizedPos = false;
};
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_skip.cpp b/src/mongo/db/pipeline/document_source_skip.cpp
index 2eead90aa3f..143a796cdf6 100644
--- a/src/mongo/db/pipeline/document_source_skip.cpp
+++ b/src/mongo/db/pipeline/document_source_skip.cpp
@@ -116,4 +116,4 @@ intrusive_ptr<DocumentSource> DocumentSourceSkip::createFromBson(
return DocumentSourceSkip::create(pExpCtx, nToSkip);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp
index 576541c207b..870394a277c 100644
--- a/src/mongo/db/pipeline/document_source_unwind.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind.cpp
@@ -286,4 +286,4 @@ intrusive_ptr<DocumentSource> DocumentSourceUnwind::createFromBson(
string pathString(Expression::removeFieldPrefix(prefixedPathString));
return DocumentSourceUnwind::create(pExpCtx, pathString, preserveNullAndEmptyArrays, indexPath);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_unwind_test.cpp b/src/mongo/db/pipeline/document_source_unwind_test.cpp
index 9eed7c7446b..e55a58a1c3b 100644
--- a/src/mongo/db/pipeline/document_source_unwind_test.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind_test.cpp
@@ -163,8 +163,7 @@ private:
void createUnwind(bool preserveNullAndEmptyArrays, bool includeArrayIndex) {
auto specObj =
DOC("$unwind" << DOC("path" << unwindFieldPath() << "preserveNullAndEmptyArrays"
- << preserveNullAndEmptyArrays
- << "includeArrayIndex"
+ << preserveNullAndEmptyArrays << "includeArrayIndex"
<< (includeArrayIndex ? Value(indexPath()) : Value())));
_unwind = static_cast<DocumentSourceUnwind*>(
DocumentSourceUnwind::createFromBson(specObj.toBson().firstElement(), ctx()).get());
@@ -474,8 +473,9 @@ class SeveralMoreDocuments : public CheckResultsBase {
deque<DocumentSource::GetNextResult> inputData() override {
return {DOC("_id" << 0 << "a" << BSONNULL),
DOC("_id" << 1),
- DOC("_id" << 2 << "a" << DOC_ARRAY("a"_sd
- << "b"_sd)),
+ DOC("_id" << 2 << "a"
+ << DOC_ARRAY("a"_sd
+ << "b"_sd)),
DOC("_id" << 3),
DOC("_id" << 4 << "a" << DOC_ARRAY(1 << 2 << 3)),
DOC("_id" << 5 << "a" << DOC_ARRAY(4 << 5 << 6)),
@@ -763,8 +763,7 @@ TEST_F(UnwindStageTest, ShouldRejectNonDollarPrefixedPath) {
TEST_F(UnwindStageTest, ShouldRejectNonBoolPreserveNullAndEmptyArrays) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << 2))),
+ << "preserveNullAndEmptyArrays" << 2))),
AssertionException,
28809);
}
@@ -772,8 +771,7 @@ TEST_F(UnwindStageTest, ShouldRejectNonBoolPreserveNullAndEmptyArrays) {
TEST_F(UnwindStageTest, ShouldRejectNonStringIncludeArrayIndex) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "includeArrayIndex"
- << 2))),
+ << "includeArrayIndex" << 2))),
AssertionException,
28810);
}
@@ -805,16 +803,13 @@ TEST_F(UnwindStageTest, ShoudlRejectDollarPrefixedIncludeArrayIndex) {
TEST_F(UnwindStageTest, ShouldRejectUnrecognizedOption) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true
- << "foo"
- << 3))),
+ << "preserveNullAndEmptyArrays" << true
+ << "foo" << 3))),
AssertionException,
28811);
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "foo"
- << 3))),
+ << "foo" << 3))),
AssertionException,
28811);
}
diff --git a/src/mongo/db/pipeline/document_source_writer.h b/src/mongo/db/pipeline/document_source_writer.h
index fd10532d469..ada2fc72a53 100644
--- a/src/mongo/db/pipeline/document_source_writer.h
+++ b/src/mongo/db/pipeline/document_source_writer.h
@@ -193,7 +193,7 @@ DocumentSource::GetNextResult DocumentSourceWriter<B>::getNext() {
waitWhileFailPointEnabled();
auto doc = nextInput.releaseDocument();
- auto[obj, objSize] = makeBatchObject(std::move(doc));
+ auto [obj, objSize] = makeBatchObject(std::move(doc));
bufferedBytes += objSize;
if (!batch.empty() &&
diff --git a/src/mongo/db/pipeline/document_value_test.cpp b/src/mongo/db/pipeline/document_value_test.cpp
index 67d669403da..f3b9e12c8de 100644
--- a/src/mongo/db/pipeline/document_value_test.cpp
+++ b/src/mongo/db/pipeline/document_value_test.cpp
@@ -597,29 +597,13 @@ TEST(MetaFields, FromBsonWithMetadataAcceptsIndexKeyMetadata) {
}
TEST(MetaFields, CopyMetadataFromCopiesAllMetadata) {
- Document source = Document::fromBsonWithMetaData(BSON(
- "a" << 1 << "$textScore" << 9.9 << "b" << 1 << "$randVal" << 42.0 << "c" << 1 << "$sortKey"
- << BSON("x" << 1)
- << "d"
- << 1
- << "$dis"
- << 3.2
- << "e"
- << 1
- << "$pt"
- << BSON_ARRAY(1 << 2)
- << "f"
- << 1
- << "$searchScore"
- << 5.4
- << "g"
- << 1
- << "$searchHighlights"
- << "foo"
- << "h"
- << 1
- << "$indexKey"
- << BSON("y" << 1)));
+ Document source = Document::fromBsonWithMetaData(
+ BSON("a" << 1 << "$textScore" << 9.9 << "b" << 1 << "$randVal" << 42.0 << "c" << 1
+ << "$sortKey" << BSON("x" << 1) << "d" << 1 << "$dis" << 3.2 << "e" << 1 << "$pt"
+ << BSON_ARRAY(1 << 2) << "f" << 1 << "$searchScore" << 5.4 << "g" << 1
+ << "$searchHighlights"
+ << "foo"
+ << "h" << 1 << "$indexKey" << BSON("y" << 1)));
MutableDocument destination{};
destination.copyMetaDataFrom(source);
diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp
index 0c0ef86f963..e8b758356d9 100644
--- a/src/mongo/db/pipeline/expression.cpp
+++ b/src/mongo/db/pipeline/expression.cpp
@@ -112,7 +112,7 @@ struct ParserRegistration {
};
StringMap<ParserRegistration> parserMap;
-}
+} // namespace
void Expression::registerExpression(
string key,
@@ -145,17 +145,16 @@ intrusive_ptr<Expression> Expression::parseExpression(
// Make sure we are allowed to use this expression under the current feature compatibility
// version.
auto& entry = it->second;
- uassert(
- ErrorCodes::QueryFeatureNotAllowed,
- // TODO SERVER-31968 we would like to include the current version and the required minimum
- // version in this error message, but using FeatureCompatibilityVersion::toString() would
- // introduce a dependency cycle.
- str::stream() << opName
- << " is not allowed in the current feature compatibility version. See "
- << feature_compatibility_version_documentation::kCompatibilityLink
- << " for more information.",
- !expCtx->maxFeatureCompatibilityVersion || !entry.requiredMinVersion ||
- (*entry.requiredMinVersion <= *expCtx->maxFeatureCompatibilityVersion));
+ uassert(ErrorCodes::QueryFeatureNotAllowed,
+ // TODO SERVER-31968 we would like to include the current version and the required
+ // minimum version in this error message, but using
+ // FeatureCompatibilityVersion::toString() would introduce a dependency cycle.
+ str::stream() << opName
+ << " is not allowed in the current feature compatibility version. See "
+ << feature_compatibility_version_documentation::kCompatibilityLink
+ << " for more information.",
+ !expCtx->maxFeatureCompatibilityVersion || !entry.requiredMinVersion ||
+ (*entry.requiredMinVersion <= *expCtx->maxFeatureCompatibilityVersion));
return entry.parser(expCtx, obj.firstElement(), vps);
}
@@ -522,13 +521,11 @@ Value ExpressionArrayElemAt::evaluate(const Document& root, Variables* variables
array.isArray());
uassert(28690,
str::stream() << getOpName() << "'s second argument must be a numeric value,"
- << " but is "
- << typeName(indexArg.getType()),
+ << " but is " << typeName(indexArg.getType()),
indexArg.numeric());
uassert(28691,
str::stream() << getOpName() << "'s second argument must be representable as"
- << " a 32-bit integer: "
- << indexArg.coerceToDouble(),
+ << " a 32-bit integer: " << indexArg.coerceToDouble(),
indexArg.integral());
long long i = indexArg.coerceToLong();
@@ -808,7 +805,7 @@ static const CmpLookup cmpLookup[7] = {
// CMP is special. Only name is used.
/* CMP */ {{false, false, false}, ExpressionCompare::CMP, "$cmp"},
};
-}
+} // namespace
Value ExpressionCompare::evaluate(const Document& root, Variables* variables) const {
Value pLeft(_children[0]->evaluate(root, variables));
@@ -1063,8 +1060,8 @@ intrusive_ptr<Expression> ExpressionDateFromParts::parse(
timeZoneElem = arg;
} else {
uasserted(40518,
- str::stream() << "Unrecognized argument to $dateFromParts: "
- << arg.fieldName());
+ str::stream()
+ << "Unrecognized argument to $dateFromParts: " << arg.fieldName());
}
}
@@ -1222,8 +1219,7 @@ bool ExpressionDateFromParts::evaluateNumberWithDefault(const Document& root,
uassert(40515,
str::stream() << "'" << fieldName << "' must evaluate to an integer, found "
- << typeName(fieldValue.getType())
- << " with value "
+ << typeName(fieldValue.getType()) << " with value "
<< fieldValue.toString(),
fieldValue.integral64Bit());
@@ -1241,17 +1237,12 @@ bool ExpressionDateFromParts::evaluateNumberWithDefaultAndBounds(const Document&
bool result =
evaluateNumberWithDefault(root, field, fieldName, defaultValue, returnValue, variables);
- uassert(31034,
- str::stream() << "'" << fieldName << "'"
- << " must evaluate to a value in the range ["
- << kMinValueForDatePart
- << ", "
- << kMaxValueForDatePart
- << "]; value "
- << *returnValue
- << " is not in range",
- !result ||
- (*returnValue >= kMinValueForDatePart && *returnValue <= kMaxValueForDatePart));
+ uassert(
+ 31034,
+ str::stream() << "'" << fieldName << "'"
+ << " must evaluate to a value in the range [" << kMinValueForDatePart << ", "
+ << kMaxValueForDatePart << "]; value " << *returnValue << " is not in range",
+ !result || (*returnValue >= kMinValueForDatePart && *returnValue <= kMaxValueForDatePart));
return result;
}
@@ -1289,9 +1280,7 @@ Value ExpressionDateFromParts::evaluate(const Document& root, Variables* variabl
uassert(40523,
str::stream() << "'year' must evaluate to an integer in the range " << 0 << " to "
- << 9999
- << ", found "
- << year,
+ << 9999 << ", found " << year,
year >= 0 && year <= 9999);
return Value(
@@ -1313,10 +1302,7 @@ Value ExpressionDateFromParts::evaluate(const Document& root, Variables* variabl
uassert(31095,
str::stream() << "'isoWeekYear' must evaluate to an integer in the range " << 0
- << " to "
- << 9999
- << ", found "
- << isoWeekYear,
+ << " to " << 9999 << ", found " << isoWeekYear,
isoWeekYear >= 0 && isoWeekYear <= 9999);
return Value(timeZone->createFromIso8601DateParts(
@@ -1393,8 +1379,8 @@ intrusive_ptr<Expression> ExpressionDateFromString::parse(
onErrorElem = arg;
} else {
uasserted(40541,
- str::stream() << "Unrecognized argument to $dateFromString: "
- << arg.fieldName());
+ str::stream()
+ << "Unrecognized argument to $dateFromString: " << arg.fieldName());
}
}
@@ -1476,8 +1462,7 @@ Value ExpressionDateFromString::evaluate(const Document& root, Variables* variab
if (!formatValue.nullish()) {
uassert(40684,
str::stream() << "$dateFromString requires that 'format' be a string, found: "
- << typeName(formatValue.getType())
- << " with value "
+ << typeName(formatValue.getType()) << " with value "
<< formatValue.toString(),
formatValue.getType() == BSONType::String);
@@ -1498,8 +1483,7 @@ Value ExpressionDateFromString::evaluate(const Document& root, Variables* variab
try {
uassert(ErrorCodes::ConversionFailure,
str::stream() << "$dateFromString requires that 'dateString' be a string, found: "
- << typeName(dateString.getType())
- << " with value "
+ << typeName(dateString.getType()) << " with value "
<< dateString.toString(),
dateString.getType() == BSONType::String);
@@ -1575,8 +1559,8 @@ intrusive_ptr<Expression> ExpressionDateToParts::parse(
isoDateElem = arg;
} else {
uasserted(40520,
- str::stream() << "Unrecognized argument to $dateToParts: "
- << arg.fieldName());
+ str::stream()
+ << "Unrecognized argument to $dateToParts: " << arg.fieldName());
}
}
@@ -1723,8 +1707,8 @@ intrusive_ptr<Expression> ExpressionDateToString::parse(
onNullElem = arg;
} else {
uasserted(18534,
- str::stream() << "Unrecognized argument to $dateToString: "
- << arg.fieldName());
+ str::stream()
+ << "Unrecognized argument to $dateToString: " << arg.fieldName());
}
}
@@ -1794,8 +1778,7 @@ Value ExpressionDateToString::evaluate(const Document& root, Variables* variable
if (!formatValue.nullish()) {
uassert(18533,
str::stream() << "$dateToString requires that 'format' be a string, found: "
- << typeName(formatValue.getType())
- << " with value "
+ << typeName(formatValue.getType()) << " with value "
<< formatValue.toString(),
formatValue.getType() == BSONType::String);
@@ -1869,9 +1852,7 @@ Value ExpressionDivide::evaluate(const Document& root, Variables* variables) con
} else {
uasserted(16609,
str::stream() << "$divide only supports numeric types, not "
- << typeName(lhs.getType())
- << " and "
- << typeName(rhs.getType()));
+ << typeName(lhs.getType()) << " and " << typeName(rhs.getType()));
}
}
@@ -2234,9 +2215,8 @@ intrusive_ptr<Expression> ExpressionFilter::optimize() {
}
Value ExpressionFilter::serialize(bool explain) const {
- return Value(
- DOC("$filter" << DOC("input" << _input->serialize(explain) << "as" << _varName << "cond"
- << _filter->serialize(explain))));
+ return Value(DOC("$filter" << DOC("input" << _input->serialize(explain) << "as" << _varName
+ << "cond" << _filter->serialize(explain))));
}
Value ExpressionFilter::evaluate(const Document& root, Variables* variables) const {
@@ -2654,9 +2634,7 @@ Value ExpressionMod::evaluate(const Document& root, Variables* variables) const
} else {
uasserted(16611,
str::stream() << "$mod only supports numeric types, not "
- << typeName(lhs.getType())
- << " and "
- << typeName(rhs.getType()));
+ << typeName(lhs.getType()) << " and " << typeName(rhs.getType()));
}
}
@@ -2776,15 +2754,12 @@ void uassertIfNotIntegralAndNonNegative(Value val,
StringData argumentName) {
uassert(40096,
str::stream() << expressionName << "requires an integral " << argumentName
- << ", found a value of type: "
- << typeName(val.getType())
- << ", with value: "
- << val.toString(),
+ << ", found a value of type: " << typeName(val.getType())
+ << ", with value: " << val.toString(),
val.integral());
uassert(40097,
str::stream() << expressionName << " requires a nonnegative " << argumentName
- << ", found: "
- << val.toString(),
+ << ", found: " << val.toString(),
val.coerceToInt() >= 0);
}
@@ -2894,8 +2869,7 @@ intrusive_ptr<Expression> ExpressionIndexOfArray::optimize() {
}
uassert(50809,
str::stream() << "First operand of $indexOfArray must be an array. First "
- << "argument is of type: "
- << typeName(valueArray.getType()),
+ << "argument is of type: " << typeName(valueArray.getType()),
valueArray.isArray());
auto arr = valueArray.getArray();
@@ -3451,7 +3425,7 @@ bool representableAsLong(long long base, long long exp) {
return base >= kBaseLimits[exp].min && base <= kBaseLimits[exp].max;
};
-}
+} // namespace
/* ----------------------- ExpressionPow ---------------------------- */
@@ -3765,7 +3739,7 @@ ValueSet arrayToSet(const Value& val, const ValueComparator& valueComparator) {
valueSet.insert(array.begin(), array.end());
return valueSet;
}
-}
+} // namespace
/* ----------------------- ExpressionSetDifference ---------------------------- */
@@ -3779,13 +3753,11 @@ Value ExpressionSetDifference::evaluate(const Document& root, Variables* variabl
uassert(17048,
str::stream() << "both operands of $setDifference must be arrays. First "
- << "argument is of type: "
- << typeName(lhs.getType()),
+ << "argument is of type: " << typeName(lhs.getType()),
lhs.isArray());
uassert(17049,
str::stream() << "both operands of $setDifference must be arrays. Second "
- << "argument is of type: "
- << typeName(rhs.getType()),
+ << "argument is of type: " << typeName(rhs.getType()),
rhs.isArray());
ValueSet rhsSet = arrayToSet(rhs, getExpressionContext()->getValueComparator());
@@ -3824,8 +3796,7 @@ Value ExpressionSetEquals::evaluate(const Document& root, Variables* variables)
const Value nextEntry = _children[i]->evaluate(root, variables);
uassert(17044,
str::stream() << "All operands of $setEquals must be arrays. One "
- << "argument is of type: "
- << typeName(nextEntry.getType()),
+ << "argument is of type: " << typeName(nextEntry.getType()),
nextEntry.isArray());
if (i == 0) {
@@ -3863,8 +3834,7 @@ Value ExpressionSetIntersection::evaluate(const Document& root, Variables* varia
}
uassert(17047,
str::stream() << "All operands of $setIntersection must be arrays. One "
- << "argument is of type: "
- << typeName(nextEntry.getType()),
+ << "argument is of type: " << typeName(nextEntry.getType()),
nextEntry.isArray());
if (i == 0) {
@@ -3908,7 +3878,7 @@ Value setIsSubsetHelper(const vector<Value>& lhs, const ValueSet& rhs) {
}
return Value(true);
}
-}
+} // namespace
Value ExpressionSetIsSubset::evaluate(const Document& root, Variables* variables) const {
const Value lhs = _children[0]->evaluate(root, variables);
@@ -3916,13 +3886,11 @@ Value ExpressionSetIsSubset::evaluate(const Document& root, Variables* variables
uassert(17046,
str::stream() << "both operands of $setIsSubset must be arrays. First "
- << "argument is of type: "
- << typeName(lhs.getType()),
+ << "argument is of type: " << typeName(lhs.getType()),
lhs.isArray());
uassert(17042,
str::stream() << "both operands of $setIsSubset must be arrays. Second "
- << "argument is of type: "
- << typeName(rhs.getType()),
+ << "argument is of type: " << typeName(rhs.getType()),
rhs.isArray());
return setIsSubsetHelper(lhs.getArray(),
@@ -3950,8 +3918,7 @@ public:
uassert(17310,
str::stream() << "both operands of $setIsSubset must be arrays. First "
- << "argument is of type: "
- << typeName(lhs.getType()),
+ << "argument is of type: " << typeName(lhs.getType()),
lhs.isArray());
return setIsSubsetHelper(lhs.getArray(), _cachedRhsSet);
@@ -3973,8 +3940,7 @@ intrusive_ptr<Expression> ExpressionSetIsSubset::optimize() {
const Value rhs = ec->getValue();
uassert(17311,
str::stream() << "both operands of $setIsSubset must be arrays. Second "
- << "argument is of type: "
- << typeName(rhs.getType()),
+ << "argument is of type: " << typeName(rhs.getType()),
rhs.isArray());
intrusive_ptr<Expression> optimizedWithConstant(
@@ -4003,8 +3969,7 @@ Value ExpressionSetUnion::evaluate(const Document& root, Variables* variables) c
}
uassert(17043,
str::stream() << "All operands of $setUnion must be arrays. One argument"
- << " is of type: "
- << typeName(newEntries.getType()),
+ << " is of type: " << typeName(newEntries.getType()),
newEntries.isArray());
unionedSet.insert(newEntries.getArray().begin(), newEntries.getArray().end());
@@ -4044,18 +4009,15 @@ Value ExpressionSlice::evaluate(const Document& root, Variables* variables) cons
uassert(28724,
str::stream() << "First argument to $slice must be an array, but is"
- << " of type: "
- << typeName(arrayVal.getType()),
+ << " of type: " << typeName(arrayVal.getType()),
arrayVal.isArray());
uassert(28725,
str::stream() << "Second argument to $slice must be a numeric value,"
- << " but is of type: "
- << typeName(arg2.getType()),
+ << " but is of type: " << typeName(arg2.getType()),
arg2.numeric());
uassert(28726,
str::stream() << "Second argument to $slice can't be represented as"
- << " a 32-bit integer: "
- << arg2.coerceToDouble(),
+ << " a 32-bit integer: " << arg2.coerceToDouble(),
arg2.integral());
const auto& array = arrayVal.getArray();
@@ -4095,13 +4057,11 @@ Value ExpressionSlice::evaluate(const Document& root, Variables* variables) cons
uassert(28727,
str::stream() << "Third argument to $slice must be numeric, but "
- << "is of type: "
- << typeName(countVal.getType()),
+ << "is of type: " << typeName(countVal.getType()),
countVal.numeric());
uassert(28728,
str::stream() << "Third argument to $slice can't be represented"
- << " as a 32-bit integer: "
- << countVal.coerceToDouble(),
+ << " as a 32-bit integer: " << countVal.coerceToDouble(),
countVal.integral());
uassert(28729,
str::stream() << "Third argument to $slice must be positive: "
@@ -4250,23 +4210,20 @@ Value ExpressionSubstrBytes::evaluate(const Document& root, Variables* variables
uassert(16034,
str::stream() << getOpName()
<< ": starting index must be a numeric type (is BSON type "
- << typeName(pLower.getType())
- << ")",
+ << typeName(pLower.getType()) << ")",
(pLower.getType() == NumberInt || pLower.getType() == NumberLong ||
pLower.getType() == NumberDouble));
uassert(16035,
str::stream() << getOpName() << ": length must be a numeric type (is BSON type "
- << typeName(pLength.getType())
- << ")",
+ << typeName(pLength.getType()) << ")",
(pLength.getType() == NumberInt || pLength.getType() == NumberLong ||
pLength.getType() == NumberDouble));
const long long signedLower = pLower.coerceToLong();
uassert(50752,
- str::stream() << getOpName() << ": starting index must be non-negative (got: "
- << signedLower
- << ")",
+ str::stream() << getOpName()
+ << ": starting index must be non-negative (got: " << signedLower << ")",
signedLower >= 0);
const string::size_type lower = static_cast<string::size_type>(signedLower);
@@ -4314,8 +4271,7 @@ Value ExpressionSubstrCP::evaluate(const Document& root, Variables* variables) c
std::string str = inputVal.coerceToString();
uassert(34450,
str::stream() << getOpName() << ": starting index must be a numeric type (is BSON type "
- << typeName(lowerVal.getType())
- << ")",
+ << typeName(lowerVal.getType()) << ")",
lowerVal.numeric());
uassert(34451,
str::stream() << getOpName()
@@ -4324,8 +4280,7 @@ Value ExpressionSubstrCP::evaluate(const Document& root, Variables* variables) c
lowerVal.integral());
uassert(34452,
str::stream() << getOpName() << ": length must be a numeric type (is BSON type "
- << typeName(lengthVal.getType())
- << ")",
+ << typeName(lengthVal.getType()) << ")",
lengthVal.numeric());
uassert(34453,
str::stream() << getOpName()
@@ -4460,8 +4415,8 @@ Value ExpressionSubtract::evaluate(const Document& root, Variables* variables) c
return Value(lhs.getDate() - Milliseconds(rhs.coerceToLong()));
} else {
uasserted(16613,
- str::stream() << "cant $subtract a " << typeName(rhs.getType())
- << " from a Date");
+ str::stream()
+ << "cant $subtract a " << typeName(rhs.getType()) << " from a Date");
}
} else {
uasserted(16556,
@@ -4587,7 +4542,7 @@ boost::intrusive_ptr<Expression> ExpressionSwitch::optimize() {
_default = _default->optimize();
}
- for (auto && [ switchCase, switchThen ] : _branches) {
+ for (auto&& [switchCase, switchThen] : _branches) {
switchCase = switchCase->optimize();
switchThen = switchThen->optimize();
}
@@ -4744,8 +4699,7 @@ std::vector<StringData> extractCodePointsFromChars(StringData utf8String,
}
uassert(50697,
str::stream()
- << "Failed to parse \"chars\" argument to "
- << expressionName
+ << "Failed to parse \"chars\" argument to " << expressionName
<< ": Detected invalid UTF-8. Missing expected continuation byte at end of string.",
i <= utf8String.size());
return codePoints;
@@ -4759,10 +4713,8 @@ Value ExpressionTrim::evaluate(const Document& root, Variables* variables) const
}
uassert(50699,
str::stream() << _name << " requires its input to be a string, got "
- << unvalidatedInput.toString()
- << " (of type "
- << typeName(unvalidatedInput.getType())
- << ") instead.",
+ << unvalidatedInput.toString() << " (of type "
+ << typeName(unvalidatedInput.getType()) << ") instead.",
unvalidatedInput.getType() == BSONType::String);
const StringData input(unvalidatedInput.getStringData());
@@ -4775,10 +4727,8 @@ Value ExpressionTrim::evaluate(const Document& root, Variables* variables) const
}
uassert(50700,
str::stream() << _name << " requires 'chars' to be a string, got "
- << unvalidatedUserChars.toString()
- << " (of type "
- << typeName(unvalidatedUserChars.getType())
- << ") instead.",
+ << unvalidatedUserChars.toString() << " (of type "
+ << typeName(unvalidatedUserChars.getType()) << ") instead.",
unvalidatedUserChars.getType() == BSONType::String);
return Value(
@@ -4877,11 +4827,8 @@ void assertFlagsValid(uint32_t flags,
long long precisionValue) {
uassert(51080,
str::stream() << "invalid conversion from Decimal128 result in " << opName
- << " resulting from arguments: ["
- << numericValue
- << ", "
- << precisionValue
- << "]",
+ << " resulting from arguments: [" << numericValue << ", "
+ << precisionValue << "]",
!Decimal128::hasFlag(flags, Decimal128::kInvalid));
}
@@ -4914,8 +4861,7 @@ static Value evaluateRoundOrTrunc(const Document& root,
precisionArg.integral());
uassert(51083,
str::stream() << "cannot apply " << opName << " with precision value "
- << precisionValue
- << " value must be in [-20, 100]",
+ << precisionValue << " value must be in [-20, 100]",
minPrecision <= precisionValue && precisionValue <= maxPrecision);
}
@@ -5163,8 +5109,7 @@ Value ExpressionZip::serialize(bool explain) const {
}
return Value(DOC("$zip" << DOC("inputs" << Value(serializedInput) << "defaults"
- << Value(serializedDefaults)
- << "useLongestLength"
+ << Value(serializedDefaults) << "useLongestLength"
<< serializedUseLongestLength)));
}
@@ -5199,9 +5144,10 @@ public:
//
table[BSONType::NumberDouble][BSONType::NumberDouble] = &performIdentityConversion;
table[BSONType::NumberDouble][BSONType::String] = &performFormatDouble;
- table[BSONType::NumberDouble]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::NumberDouble][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::NumberDouble][BSONType::Date] = &performCastNumberToDate;
table[BSONType::NumberDouble][BSONType::NumberInt] = &performCastDoubleToInt;
table[BSONType::NumberDouble][BSONType::NumberLong] = &performCastDoubleToLong;
@@ -5217,11 +5163,11 @@ public:
table[BSONType::String][BSONType::String] = &performIdentityConversion;
table[BSONType::String][BSONType::jstOID] = &parseStringToOID;
table[BSONType::String][BSONType::Bool] = &performConvertToTrue;
- table[BSONType::String][BSONType::Date] = [](
- const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
- return Value(expCtx->timeZoneDatabase->fromString(inputValue.getStringData(),
- mongo::TimeZoneDatabase::utcZone()));
- };
+ table[BSONType::String][BSONType::Date] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(expCtx->timeZoneDatabase->fromString(
+ inputValue.getStringData(), mongo::TimeZoneDatabase::utcZone()));
+ };
table[BSONType::String][BSONType::NumberInt] = &parseStringToNumber<int, 10>;
table[BSONType::String][BSONType::NumberLong] = &parseStringToNumber<long long, 10>;
table[BSONType::String][BSONType::NumberDecimal] = &parseStringToNumber<Decimal128, 0>;
@@ -5278,9 +5224,10 @@ public:
inputValue.getDate());
return Value(dateString);
};
- table[BSONType::Date]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::Date][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::Date][BSONType::Date] = &performIdentityConversion;
table[BSONType::Date][BSONType::NumberLong] =
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
@@ -5303,9 +5250,10 @@ public:
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
return Value(static_cast<std::string>(str::stream() << inputValue.getInt()));
};
- table[BSONType::NumberInt]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::NumberInt][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::NumberInt][BSONType::NumberInt] = &performIdentityConversion;
table[BSONType::NumberInt][BSONType::NumberLong] =
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
@@ -5327,9 +5275,10 @@ public:
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
return Value(static_cast<std::string>(str::stream() << inputValue.getLong()));
};
- table[BSONType::NumberLong]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::NumberLong][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::NumberLong][BSONType::Date] = &performCastNumberToDate;
table[BSONType::NumberLong][BSONType::NumberInt] = &performCastLongToInt;
table[BSONType::NumberLong][BSONType::NumberLong] = &performIdentityConversion;
@@ -5346,9 +5295,10 @@ public:
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
return Value(inputValue.getDecimal().toString());
};
- table[BSONType::NumberDecimal]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::NumberDecimal][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::NumberDecimal][BSONType::Date] = &performCastNumberToDate;
table[BSONType::NumberDecimal][BSONType::NumberInt] =
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
@@ -5395,8 +5345,7 @@ public:
uassert(ErrorCodes::ConversionFailure,
str::stream() << "Unsupported conversion from " << typeName(inputType) << " to "
- << typeName(targetType)
- << " in $convert with no onError value",
+ << typeName(targetType) << " in $convert with no onError value",
foundFunction);
return foundFunction;
}
@@ -5570,8 +5519,7 @@ private:
Status parseStatus = NumberParser().base(base)(stringValue, &result);
uassert(ErrorCodes::ConversionFailure,
str::stream() << "Failed to parse number '" << stringValue
- << "' in $convert with no onError value: "
- << parseStatus.reason(),
+ << "' in $convert with no onError value: " << parseStatus.reason(),
parseStatus.isOK());
return Value(result);
@@ -5586,8 +5534,7 @@ private:
// and returned.
uasserted(ErrorCodes::ConversionFailure,
str::stream() << "Failed to parse objectId '" << inputValue.getString()
- << "' in $convert with no onError value: "
- << ex.reason());
+ << "' in $convert with no onError value: " << ex.reason());
}
}
@@ -5606,7 +5553,6 @@ Expression::Parser makeConversionAlias(const StringData shortcutName, BSONType t
return [=](const intrusive_ptr<ExpressionContext>& expCtx,
BSONElement elem,
const VariablesParseState& vps) -> intrusive_ptr<Expression> {
-
// Use parseArguments to allow for a singleton array, or the unwrapped version.
auto operands = ExpressionNary::parseArguments(expCtx, elem, vps);
@@ -5681,8 +5627,8 @@ intrusive_ptr<Expression> ExpressionConvert::parse(
onNull = parseOperand(expCtx, elem, vps);
} else {
uasserted(ErrorCodes::FailedToParse,
- str::stream() << "$convert found an unknown argument: "
- << elem.fieldNameStringData());
+ str::stream()
+ << "$convert found an unknown argument: " << elem.fieldNameStringData());
}
}
@@ -5808,8 +5754,8 @@ auto CommonRegexParse(const boost::intrusive_ptr<ExpressionContext>& expCtx,
const VariablesParseState& vpsIn,
StringData opName) {
uassert(51103,
- str::stream() << opName << " expects an object of named arguments but found: "
- << expr.type(),
+ str::stream() << opName
+ << " expects an object of named arguments but found: " << expr.type(),
expr.type() == BSONType::Object);
struct {
@@ -5881,8 +5827,7 @@ int ExpressionRegex::execute(RegexExecutionState* regexState) const {
// capacity is not sufficient to hold all the results. The latter scenario should never occur.
uassert(51156,
str::stream() << "Error occurred while executing the regular expression in " << _opName
- << ". Result code: "
- << execResult,
+ << ". Result code: " << execResult,
execResult == -1 || execResult == (regexState->numCaptures + 1));
return execResult;
}
@@ -6071,7 +6016,7 @@ boost::intrusive_ptr<Expression> ExpressionRegexFind::parse(
BSONElement expr,
const VariablesParseState& vpsIn) {
auto opName = "$regexFind"_sd;
- auto[input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
+ auto [input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
return new ExpressionRegexFind(
expCtx, std::move(input), std::move(regex), std::move(options), opName);
}
@@ -6092,7 +6037,7 @@ boost::intrusive_ptr<Expression> ExpressionRegexFindAll::parse(
BSONElement expr,
const VariablesParseState& vpsIn) {
auto opName = "$regexFindAll"_sd;
- auto[input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
+ auto [input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
return new ExpressionRegexFindAll(
expCtx, std::move(input), std::move(regex), std::move(options), opName);
}
@@ -6153,7 +6098,7 @@ boost::intrusive_ptr<Expression> ExpressionRegexMatch::parse(
BSONElement expr,
const VariablesParseState& vpsIn) {
auto opName = "$regexMatch"_sd;
- auto[input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
+ auto [input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
return new ExpressionRegexMatch(
expCtx, std::move(input), std::move(regex), std::move(options), opName);
}
diff --git a/src/mongo/db/pipeline/expression.h b/src/mongo/db/pipeline/expression.h
index 9b54e5e7995..78bd70c5e91 100644
--- a/src/mongo/db/pipeline/expression.h
+++ b/src/mongo/db/pipeline/expression.h
@@ -369,10 +369,7 @@ public:
void validateArguments(const Expression::ExpressionVector& args) const override {
uassert(28667,
str::stream() << "Expression " << this->getOpName() << " takes at least " << MinArgs
- << " arguments, and at most "
- << MaxArgs
- << ", but "
- << args.size()
+ << " arguments, and at most " << MaxArgs << ", but " << args.size()
<< " were passed in.",
MinArgs <= args.size() && args.size() <= MaxArgs);
}
@@ -388,9 +385,7 @@ public:
void validateArguments(const Expression::ExpressionVector& args) const override {
uassert(16020,
str::stream() << "Expression " << this->getOpName() << " takes exactly " << NArgs
- << " arguments. "
- << args.size()
- << " were passed in.",
+ << " arguments. " << args.size() << " were passed in.",
args.size() == NArgs);
}
};
@@ -613,9 +608,7 @@ public:
uassert(40533,
str::stream() << _opName
<< " requires a string for the timezone argument, but was given a "
- << typeName(timeZoneId.getType())
- << " ("
- << timeZoneId.toString()
+ << typeName(timeZoneId.getType()) << " (" << timeZoneId.toString()
<< ")",
timeZoneId.getType() == BSONType::String);
@@ -676,13 +669,12 @@ public:
} else {
uasserted(40535,
str::stream() << "unrecognized option to " << opName << ": \""
- << argName
- << "\"");
+ << argName << "\"");
}
}
uassert(40539,
- str::stream() << "missing 'date' argument to " << opName << ", provided: "
- << operatorElem,
+ str::stream() << "missing 'date' argument to " << opName
+ << ", provided: " << operatorElem,
date);
return new SubClass(expCtx, std::move(date), std::move(timeZone));
}
@@ -2718,4 +2710,4 @@ public:
using ExpressionRegex::ExpressionRegex;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/expression_convert_test.cpp b/src/mongo/db/pipeline/expression_convert_test.cpp
index acee5cf618b..edd9f1f4ccf 100644
--- a/src/mongo/db/pipeline/expression_convert_test.cpp
+++ b/src/mongo/db/pipeline/expression_convert_test.cpp
@@ -80,8 +80,7 @@ TEST_F(ExpressionConvertTest, ParseAndSerializeWithOnError) {
<< "$path1"
<< "to"
<< "int"
- << "onError"
- << 0));
+ << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(
@@ -100,8 +99,7 @@ TEST_F(ExpressionConvertTest, ParseAndSerializeWithOnNull) {
<< "$path1"
<< "to"
<< "int"
- << "onNull"
- << 0));
+ << "onNull" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(
@@ -118,8 +116,7 @@ TEST_F(ExpressionConvertTest, ConvertWithoutInputFailsToParse) {
auto spec = BSON("$convert" << BSON("to"
<< "int"
- << "onError"
- << 0));
+ << "onError" << 0));
ASSERT_THROWS_WITH_CHECK(Expression::parseExpression(expCtx, spec, expCtx->variablesParseState),
AssertionException,
[](const AssertionException& exception) {
@@ -134,8 +131,7 @@ TEST_F(ExpressionConvertTest, ConvertWithoutToFailsToParse) {
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "onError"
- << 0));
+ << "onError" << 0));
ASSERT_THROWS_WITH_CHECK(Expression::parseExpression(expCtx, spec, expCtx->variablesParseState),
AssertionException,
[](const AssertionException& exception) {
@@ -152,8 +148,7 @@ TEST_F(ExpressionConvertTest, InvalidTypeNameFails) {
<< "$path1"
<< "to"
<< "dinosaur"
- << "onError"
- << 0));
+ << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -170,10 +165,7 @@ TEST_F(ExpressionConvertTest, NonIntegralTypeFails) {
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << 3.6
- << "onError"
- << 0));
+ << "to" << 3.6 << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -195,8 +187,7 @@ TEST_F(ExpressionConvertTest, NonStringNonNumericalTypeFails) {
<< "to"
<< BSON("dinosaur"
<< "Tyrannosaurus rex")
- << "onError"
- << 0));
+ << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -215,10 +206,7 @@ TEST_F(ExpressionConvertTest, InvalidNumericTargetTypeFails) {
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << 100
- << "onError"
- << 0));
+ << "to" << 100 << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -238,10 +226,7 @@ TEST_F(ExpressionConvertTest, NegativeNumericTargetTypeFails) {
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << -2
- << "onError"
- << 0));
+ << "to" << -2 << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -297,8 +282,7 @@ TEST_F(ExpressionConvertTest, UnsupportedConversionShouldThrowUnlessOnErrorProvi
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << Value(targetTypeName)));
+ << "to" << Value(targetTypeName)));
Document input{{"path1", inputValue}};
@@ -320,9 +304,7 @@ TEST_F(ExpressionConvertTest, UnsupportedConversionShouldThrowUnlessOnErrorProvi
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << Value(targetTypeName)
- << "onError"
+ << "to" << Value(targetTypeName) << "onError"
<< "X"));
Document input{{"path1", inputValue}};
diff --git a/src/mongo/db/pipeline/expression_date_test.cpp b/src/mongo/db/pipeline/expression_date_test.cpp
index 67e798d17af..49099b47b36 100644
--- a/src/mongo/db/pipeline/expression_date_test.cpp
+++ b/src/mongo/db/pipeline/expression_date_test.cpp
@@ -46,14 +46,10 @@ TEST_F(ExpressionDateFromPartsTest, SerializesToObjectSyntax) {
// Test that it serializes to the full format if given an object specification.
BSONObj spec =
- BSON("$dateFromParts" << BSON(
- "year" << 2017 << "month" << 6 << "day" << 27 << "hour" << 14 << "minute" << 37
- << "second"
- << 15
- << "millisecond"
- << 414
- << "timezone"
- << "America/Los_Angeles"));
+ BSON("$dateFromParts" << BSON("year" << 2017 << "month" << 6 << "day" << 27 << "hour" << 14
+ << "minute" << 37 << "second" << 15 << "millisecond"
+ << 414 << "timezone"
+ << "America/Los_Angeles"));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
auto expectedSerialization =
Value(Document{{"$dateFromParts",
@@ -84,16 +80,15 @@ TEST_F(ExpressionDateFromPartsTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it becomes a constant if both year, hour and minute are provided, and are both
// expressions which evaluate to constants.
spec = BSON("$dateFromParts" << BSON("year" << BSON("$add" << BSON_ARRAY(1900 << 107)) << "hour"
- << BSON("$add" << BSON_ARRAY(13 << 1))
- << "minute"
+ << BSON("$add" << BSON_ARRAY(13 << 1)) << "minute"
<< BSON("$add" << BSON_ARRAY(40 << 3))));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
// Test that it becomes a constant if both year and milliseconds are provided, and year is an
// expressions which evaluate to a constant, with milliseconds a constant
- spec = BSON("$dateFromParts" << BSON(
- "year" << BSON("$add" << BSON_ARRAY(1900 << 107)) << "millisecond" << 514));
+ spec = BSON("$dateFromParts" << BSON("year" << BSON("$add" << BSON_ARRAY(1900 << 107))
+ << "millisecond" << 514));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -105,11 +100,10 @@ TEST_F(ExpressionDateFromPartsTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it becomes a constant if both isoWeekYear, isoWeek and isoDayOfWeek are provided,
// and are both expressions which evaluate to constants.
- spec = BSON("$dateFromParts" << BSON("isoWeekYear" << BSON("$add" << BSON_ARRAY(1017 << 1000))
- << "isoWeek"
- << BSON("$add" << BSON_ARRAY(20 << 6))
- << "isoDayOfWeek"
- << BSON("$add" << BSON_ARRAY(3 << 2))));
+ spec = BSON("$dateFromParts" << BSON("isoWeekYear"
+ << BSON("$add" << BSON_ARRAY(1017 << 1000)) << "isoWeek"
+ << BSON("$add" << BSON_ARRAY(20 << 6)) << "isoDayOfWeek"
+ << BSON("$add" << BSON_ARRAY(3 << 2))));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -117,8 +111,7 @@ TEST_F(ExpressionDateFromPartsTest, OptimizesToConstantIfAllInputsAreConstant) {
// year is not a constant.
spec = BSON("$dateFromParts" << BSON("year"
<< "$year"
- << "month"
- << 6));
+ << "month" << 6));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_FALSE(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -180,8 +173,7 @@ TEST_F(ExpressionDateToPartsTest, SerializesToObjectSyntax) {
// Test that it serializes to the full format if given an object specification.
BSONObj spec = BSON("$dateToParts" << BSON("date" << Date_t{} << "timezone"
<< "Europe/London"
- << "iso8601"
- << false));
+ << "iso8601" << false));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
auto expectedSerialization =
Value(Document{{"$dateToParts",
@@ -224,8 +216,7 @@ TEST_F(ExpressionDateToPartsTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it becomes a constant if both date and iso8601 are provided, and are both
// expressions which evaluate to constants.
spec = BSON("$dateToParts" << BSON("date" << BSON("$add" << BSON_ARRAY(Date_t{} << 1000))
- << "iso8601"
- << BSON("$not" << false)));
+ << "iso8601" << BSON("$not" << false)));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -305,8 +296,7 @@ TEST_F(DateExpressionTest, ParsingRejectsUnrecognizedFieldsInObjectSpecification
for (auto&& expName : dateExpressions) {
BSONObj spec = BSON(expName << BSON("date" << Date_t{} << "timezone"
<< "Europe/London"
- << "extra"
- << 4));
+ << "extra" << 4));
ASSERT_THROWS_CODE(Expression::parseExpression(expCtx, spec, expCtx->variablesParseState),
AssertionException,
40535);
@@ -561,8 +551,7 @@ TEST_F(DateExpressionTest, DoesResultInNullIfGivenNullishInput) {
// Test that the expression results in null if the date and timezone both nullish.
spec = BSON(expName << BSON("date"
<< "$missing"
- << "timezone"
- << BSONUndefined));
+ << "timezone" << BSONUndefined));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate(contextDoc, &expCtx->variables));
@@ -619,8 +608,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// missing.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}));
+ << "date" << Date_t{}));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -628,9 +616,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// constants.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}
- << "timezone"
+ << "date" << Date_t{} << "timezone"
<< "Europe/Amsterdam"));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -639,8 +625,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// expressions which evaluate to constants.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m%d"
- << "date"
- << BSON("$add" << BSON_ARRAY(Date_t{} << 1000))
+ << "date" << BSON("$add" << BSON_ARRAY(Date_t{} << 1000))
<< "timezone"
<< BSON("$concat" << BSON_ARRAY("Europe"
<< "/"
@@ -652,9 +637,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// 'onNull'.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}
- << "timezone"
+ << "date" << Date_t{} << "timezone"
<< "Europe/Amsterdam"
<< "onNull"
<< "null default"));
@@ -676,9 +659,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// timezone is not a constant.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}
- << "timezone"
+ << "date" << Date_t{} << "timezone"
<< "$tz"));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_FALSE(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -686,9 +667,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it does *not* become a constant if 'onNull' does not evaluate to a constant.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}
- << "onNull"
+ << "date" << Date_t{} << "onNull"
<< "$onNull"));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_FALSE(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -696,8 +675,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it does *not* become a constant if 'format' does not evaluate to a constant.
spec = BSON("$dateToString" << BSON("format"
<< "$format"
- << "date"
- << Date_t{}));
+ << "date" << Date_t{}));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_FALSE(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
}
@@ -707,19 +685,14 @@ TEST_F(ExpressionDateToStringTest, ReturnsOnNullValueWhenInputIsNullish) {
auto spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << BSONNULL
- << "onNull"
+ << "date" << BSONNULL << "onNull"
<< "null default"));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value("null default"_sd), dateExp->evaluate({}, &expCtx->variables));
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << BSONNULL
- << "onNull"
- << BSONNULL));
+ << "date" << BSONNULL << "onNull" << BSONNULL));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
@@ -1074,15 +1047,13 @@ TEST_F(ExpressionDateFromStringTest, RejectsNonStringFormat) {
auto spec = BSON("$dateFromString" << BSON("dateString"
<< "2017-07-13T10:02:57"
- << "format"
- << 2));
+ << "format" << 2));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40684);
spec = BSON("$dateFromString" << BSON("dateString"
<< "July 4, 2017"
- << "format"
- << true));
+ << "format" << true));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40684);
}
@@ -1126,8 +1097,7 @@ TEST_F(ExpressionDateFromStringTest, EvaluatesToNullIfFormatIsNullish) {
auto spec = BSON("$dateFromString" << BSON("dateString"
<< "1/1/2017"
- << "format"
- << BSONNULL));
+ << "format" << BSONNULL));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
@@ -1140,8 +1110,7 @@ TEST_F(ExpressionDateFromStringTest, EvaluatesToNullIfFormatIsNullish) {
spec = BSON("$dateFromString" << BSON("dateString"
<< "1/1/2017"
- << "format"
- << BSONUndefined));
+ << "format" << BSONUndefined));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
}
@@ -1265,8 +1234,7 @@ TEST_F(ExpressionDateFromStringTest, InvalidFormatTakesPrecedenceOverOnNull) {
auto spec = BSON("$dateFromString" << BSON("dateString" << BSONNULL << "onNull"
<< "Null default"
- << "format"
- << 5));
+ << "format" << 5));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40684);
@@ -1285,8 +1253,7 @@ TEST_F(ExpressionDateFromStringTest, InvalidFormatTakesPrecedenceOverOnError) {
<< "Invalid dateString"
<< "onError"
<< "Not used default"
- << "format"
- << 5));
+ << "format" << 5));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40684);
@@ -1303,8 +1270,7 @@ TEST_F(ExpressionDateFromStringTest, InvalidTimezoneTakesPrecedenceOverOnNull) {
auto spec = BSON("$dateFromString" << BSON("dateString" << BSONNULL << "onNull"
<< "Null default"
- << "timezone"
- << 5));
+ << "timezone" << 5));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40517);
@@ -1323,8 +1289,7 @@ TEST_F(ExpressionDateFromStringTest, InvalidTimezoneTakesPrecedenceOverOnError)
<< "Invalid dateString"
<< "onError"
<< "On error default"
- << "timezone"
- << 5));
+ << "timezone" << 5));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40517);
@@ -1341,15 +1306,13 @@ TEST_F(ExpressionDateFromStringTest, OnNullTakesPrecedenceOverOtherNullishParame
auto spec = BSON("$dateFromString" << BSON("dateString" << BSONNULL << "onNull"
<< "Null default"
- << "timezone"
- << BSONNULL));
+ << "timezone" << BSONNULL));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value("Null default"_sd), dateExp->evaluate({}, &expCtx->variables));
spec = BSON("$dateFromString" << BSON("dateString" << BSONNULL << "onNull"
<< "Null default"
- << "format"
- << BSONNULL));
+ << "format" << BSONNULL));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value("Null default"_sd), dateExp->evaluate({}, &expCtx->variables));
}
@@ -1361,8 +1324,7 @@ TEST_F(ExpressionDateFromStringTest, OnNullOnlyUsedIfInputStringIsNullish) {
<< "2018-02-14"
<< "onNull"
<< "Null default"
- << "timezone"
- << BSONNULL));
+ << "timezone" << BSONNULL));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
@@ -1370,8 +1332,7 @@ TEST_F(ExpressionDateFromStringTest, OnNullOnlyUsedIfInputStringIsNullish) {
<< "2018-02-14"
<< "onNull"
<< "Null default"
- << "format"
- << BSONNULL));
+ << "format" << BSONNULL));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
}
@@ -1406,10 +1367,10 @@ TEST_F(ExpressionDateFromStringTest, ReturnsOnErrorForFormatMismatch) {
TEST_F(ExpressionDateFromStringTest, OnNullEvaluatedLazily) {
auto expCtx = getExpCtx();
- auto spec = BSON("$dateFromString" << BSON("dateString"
- << "$date"
- << "onNull"
- << BSON("$divide" << BSON_ARRAY(1 << 0))));
+ auto spec =
+ BSON("$dateFromString" << BSON("dateString"
+ << "$date"
+ << "onNull" << BSON("$divide" << BSON_ARRAY(1 << 0))));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_EQ(
"2018-02-14T00:00:00.000Z",
@@ -1420,10 +1381,10 @@ TEST_F(ExpressionDateFromStringTest, OnNullEvaluatedLazily) {
TEST_F(ExpressionDateFromStringTest, OnErrorEvaluatedLazily) {
auto expCtx = getExpCtx();
- auto spec = BSON("$dateFromString" << BSON("dateString"
- << "$date"
- << "onError"
- << BSON("$divide" << BSON_ARRAY(1 << 0))));
+ auto spec =
+ BSON("$dateFromString" << BSON("dateString"
+ << "$date"
+ << "onError" << BSON("$divide" << BSON_ARRAY(1 << 0))));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_EQ(
"2018-02-14T00:00:00.000Z",
diff --git a/src/mongo/db/pipeline/expression_test.cpp b/src/mongo/db/pipeline/expression_test.cpp
index 34cd1a61335..98800c2af63 100644
--- a/src/mongo/db/pipeline/expression_test.cpp
+++ b/src/mongo/db/pipeline/expression_test.cpp
@@ -47,13 +47,13 @@ namespace ExpressionTests {
using boost::intrusive_ptr;
using std::initializer_list;
+using std::list;
using std::numeric_limits;
using std::pair;
using std::set;
using std::sort;
using std::string;
using std::vector;
-using std::list;
/**
* Creates an expression given by 'expressionName' and evaluates it using
@@ -590,8 +590,8 @@ TEST_F(ExpressionNaryTest, FlattenInnerOperandsOptimizationOnAssociativeOnlyMidd
intrusive_ptr<Expression> optimized = _associativeOnly->optimize();
ASSERT(_associativeOnly == optimized);
- BSONArray expectedContent = BSON_ARRAY(
- 200 << "$path3" << BSON_ARRAY(201 << 100) << "$path1" << BSON_ARRAY(101 << 99) << "$path2");
+ BSONArray expectedContent = BSON_ARRAY(200 << "$path3" << BSON_ARRAY(201 << 100) << "$path1"
+ << BSON_ARRAY(101 << 99) << "$path2");
assertContents(_associativeOnly, expectedContent);
}
@@ -737,12 +737,10 @@ TEST(ExpressionArrayToObjectTest, KVFormatSimple) {
assertExpectedResults("$arrayToObject",
{{{Value(BSON_ARRAY(BSON("k"
<< "key1"
- << "v"
- << 2)
+ << "v" << 2)
<< BSON("k"
<< "key2"
- << "v"
- << 3)))},
+ << "v" << 3)))},
{Value(BSON("key1" << 2 << "key2" << 3))}}});
}
@@ -750,12 +748,10 @@ TEST(ExpressionArrayToObjectTest, KVFormatWithDuplicates) {
assertExpectedResults("$arrayToObject",
{{{Value(BSON_ARRAY(BSON("k"
<< "hi"
- << "v"
- << 2)
+ << "v" << 2)
<< BSON("k"
<< "hi"
- << "v"
- << 3)))},
+ << "v" << 3)))},
{Value(BSON("hi" << 3))}}});
}
@@ -1888,8 +1884,7 @@ class NonConstantZero : public OptimizeBase {
class NonConstantNonConstantOne : public OptimizeBase {
BSONObj spec() {
return BSON("$and" << BSON_ARRAY("$a"
- << "$b"
- << 1));
+ << "$b" << 1));
}
BSONObj expectedOptimized() {
return BSON("$and" << BSON_ARRAY("$a"
@@ -1901,8 +1896,7 @@ class NonConstantNonConstantOne : public OptimizeBase {
class NonConstantNonConstantZero : public OptimizeBase {
BSONObj spec() {
return BSON("$and" << BSON_ARRAY("$a"
- << "$b"
- << 0));
+ << "$b" << 0));
}
BSONObj expectedOptimized() {
return BSON("$const" << false);
@@ -3261,8 +3255,7 @@ TEST(ExpressionObjectParse, ShouldAcceptLiteralsAsValues) {
auto object = ExpressionObject::parse(expCtx,
BSON("a" << 5 << "b"
<< "string"
- << "c"
- << BSONNULL),
+ << "c" << BSONNULL),
vps);
auto expectedResult =
Value(Document{{"a", literal(5)}, {"b", literal("string"_sd)}, {"c", literal(BSONNULL)}});
@@ -3386,10 +3379,10 @@ auto expressionObjectCreateHelper(
expressionsWithChildrenInPlace) {
std::vector<boost::intrusive_ptr<Expression>> children;
std::vector<std::pair<std::string, boost::intrusive_ptr<Expression>&>> expressions;
- for (auto & [ unused, expression ] : expressionsWithChildrenInPlace)
+ for (auto& [unused, expression] : expressionsWithChildrenInPlace)
children.push_back(std::move(expression));
std::vector<boost::intrusive_ptr<Expression>>::size_type index = 0;
- for (auto & [ fieldName, unused ] : expressionsWithChildrenInPlace) {
+ for (auto& [fieldName, unused] : expressionsWithChildrenInPlace) {
expressions.emplace_back(fieldName, children[index]);
++index;
}
@@ -3840,8 +3833,7 @@ class NonConstantZero : public OptimizeBase {
class NonConstantNonConstantOne : public OptimizeBase {
BSONObj spec() {
return BSON("$or" << BSON_ARRAY("$a"
- << "$b"
- << 1));
+ << "$b" << 1));
}
BSONObj expectedOptimized() {
return BSON("$const" << true);
@@ -3852,8 +3844,7 @@ class NonConstantNonConstantOne : public OptimizeBase {
class NonConstantNonConstantZero : public OptimizeBase {
BSONObj spec() {
return BSON("$or" << BSON_ARRAY("$a"
- << "$b"
- << 0));
+ << "$b" << 0));
}
BSONObj expectedOptimized() {
return BSON("$or" << BSON_ARRAY("$a"
@@ -4174,12 +4165,9 @@ class Same : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection"
- << DOC_ARRAY(1 << 2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << "$setIntersection" << DOC_ARRAY(1 << 2)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4187,12 +4175,9 @@ class Redundant : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1 << 2 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection"
- << DOC_ARRAY(1 << 2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << "$setIntersection" << DOC_ARRAY(1 << 2)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4201,11 +4186,8 @@ class DoubleRedundant : public ExpectedResultBase {
return DOC(
"input" << DOC_ARRAY(DOC_ARRAY(1 << 1 << 2) << DOC_ARRAY(1 << 2 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true << "$setIntersection"
- << DOC_ARRAY(1 << 2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << DOC_ARRAY(1 << 2) << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4213,12 +4195,9 @@ class Super : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection"
- << DOC_ARRAY(1)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << DOC_ARRAY(2)));
+ << "$setIntersection" << DOC_ARRAY(1)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << DOC_ARRAY(2)));
}
};
@@ -4226,12 +4205,9 @@ class SuperWithRedundant : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 2) << DOC_ARRAY(1)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection"
- << DOC_ARRAY(1)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << DOC_ARRAY(2)));
+ << "$setIntersection" << DOC_ARRAY(1)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << DOC_ARRAY(2)));
}
};
@@ -4239,12 +4215,9 @@ class Sub : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << false
- << "$setIntersection"
- << DOC_ARRAY(1)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << "$setIntersection" << DOC_ARRAY(1)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4252,12 +4225,9 @@ class SameBackwards : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(2 << 1)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection"
- << DOC_ARRAY(1 << 2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << "$setIntersection" << DOC_ARRAY(1 << 2)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4265,12 +4235,9 @@ class NoOverlap : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(8 << 4)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection"
- << vector<Value>()
- << "$setUnion"
- << DOC_ARRAY(1 << 2 << 4 << 8)
- << "$setDifference"
- << DOC_ARRAY(1 << 2)));
+ << "$setIntersection" << vector<Value>()
+ << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
+ << "$setDifference" << DOC_ARRAY(1 << 2)));
}
};
@@ -4278,12 +4245,9 @@ class Overlap : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(8 << 2 << 4)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection"
- << DOC_ARRAY(2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2 << 4 << 8)
- << "$setDifference"
- << DOC_ARRAY(1)));
+ << "$setIntersection" << DOC_ARRAY(2)
+ << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
+ << "$setDifference" << DOC_ARRAY(1)));
}
};
@@ -4291,8 +4255,7 @@ class LastNull : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << Value(BSONNULL)) << "expected"
<< DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
- << "$setDifference"
- << BSONNULL)
+ << "$setDifference" << BSONNULL)
<< "error"
<< DOC_ARRAY("$setEquals"_sd
<< "$setIsSubset"_sd));
@@ -4303,8 +4266,7 @@ class FirstNull : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(Value(BSONNULL) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
- << "$setDifference"
- << BSONNULL)
+ << "$setDifference" << BSONNULL)
<< "error"
<< DOC_ARRAY("$setEquals"_sd
<< "$setIsSubset"_sd));
@@ -4315,8 +4277,7 @@ class LeftNullAndRightEmpty : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(Value(BSONNULL) << vector<Value>()) << "expected"
<< DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
- << "$setDifference"
- << BSONNULL)
+ << "$setDifference" << BSONNULL)
<< "error"
<< DOC_ARRAY("$setEquals"_sd
<< "$setIsSubset"_sd));
@@ -4327,8 +4288,7 @@ class RightNullAndLeftEmpty : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(vector<Value>() << Value(BSONNULL)) << "expected"
<< DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
- << "$setDifference"
- << BSONNULL)
+ << "$setDifference" << BSONNULL)
<< "error"
<< DOC_ARRAY("$setEquals"_sd
<< "$setIsSubset"_sd));
@@ -4375,12 +4335,8 @@ class LeftArgEmpty : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(vector<Value>() << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setIsSubset"
- << true
- << "$setEquals"
- << false
- << "$setDifference"
+ << DOC_ARRAY(1 << 2) << "$setIsSubset" << true
+ << "$setEquals" << false << "$setDifference"
<< vector<Value>()));
}
};
@@ -4389,45 +4345,39 @@ class RightArgEmpty : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << vector<Value>()) << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setIsSubset"
- << false
- << "$setEquals"
- << false
- << "$setDifference"
+ << DOC_ARRAY(1 << 2) << "$setIsSubset" << false
+ << "$setEquals" << false << "$setDifference"
<< DOC_ARRAY(1 << 2)));
}
};
class ManyArgs : public ExpectedResultBase {
Document getSpec() {
- return DOC(
- "input" << DOC_ARRAY(DOC_ARRAY(8 << 3) << DOC_ARRAY("asdf"_sd
- << "foo"_sd)
- << DOC_ARRAY(80.3 << 34)
- << vector<Value>()
- << DOC_ARRAY(80.3 << "foo"_sd << 11 << "yay"_sd))
- << "expected"
- << DOC("$setIntersection" << vector<Value>() << "$setEquals" << false
- << "$setUnion"
- << DOC_ARRAY(3 << 8 << 11 << 34 << 80.3 << "asdf"_sd
- << "foo"_sd
- << "yay"_sd))
- << "error"
- << DOC_ARRAY("$setIsSubset"_sd
- << "$setDifference"_sd));
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(8 << 3)
+ << DOC_ARRAY("asdf"_sd
+ << "foo"_sd)
+ << DOC_ARRAY(80.3 << 34) << vector<Value>()
+ << DOC_ARRAY(80.3 << "foo"_sd << 11 << "yay"_sd))
+ << "expected"
+ << DOC("$setIntersection"
+ << vector<Value>() << "$setEquals" << false << "$setUnion"
+ << DOC_ARRAY(3 << 8 << 11 << 34 << 80.3 << "asdf"_sd
+ << "foo"_sd
+ << "yay"_sd))
+ << "error"
+ << DOC_ARRAY("$setIsSubset"_sd
+ << "$setDifference"_sd));
}
};
class ManyArgsEqual : public ExpectedResultBase {
Document getSpec() {
- return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 4) << DOC_ARRAY(1 << 2 << 2 << 4)
- << DOC_ARRAY(4 << 1 << 2)
- << DOC_ARRAY(2 << 1 << 1 << 4))
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 4)
+ << DOC_ARRAY(1 << 2 << 2 << 4) << DOC_ARRAY(4 << 1 << 2)
+ << DOC_ARRAY(2 << 1 << 1 << 4))
<< "expected"
<< DOC("$setIntersection" << DOC_ARRAY(1 << 2 << 4) << "$setEquals"
- << true
- << "$setUnion"
+ << true << "$setUnion"
<< DOC_ARRAY(1 << 2 << 4))
<< "error"
<< DOC_ARRAY("$setIsSubset"_sd
@@ -4714,7 +4664,7 @@ TEST(ExpressionSubstrTest, ThrowsWithNegativeStart) {
ASSERT_THROWS([&] { expr->evaluate({}, &expCtx->variables); }(), AssertionException);
}
-} // namespace Substr
+} // namespace SubstrBytes
namespace SubstrCP {
@@ -4829,8 +4779,7 @@ TEST(ExpressionTrimParsingTest, ThrowsIfSpecContainsUnrecognizedField) {
ASSERT_THROWS(Expression::parseExpression(expCtx,
BSON("$ltrim" << BSON("chars"
<< "xyz"
- << "other"
- << 1)),
+ << "other" << 1)),
expCtx->variablesParseState),
AssertionException);
ASSERT_THROWS(Expression::parseExpression(expCtx,
@@ -4838,8 +4787,7 @@ TEST(ExpressionTrimParsingTest, ThrowsIfSpecContainsUnrecognizedField) {
<< "$x"
<< "chars"
<< "xyz"
- << "other"
- << 1)),
+ << "other" << 1)),
expCtx->variablesParseState),
AssertionException);
}
@@ -5357,8 +5305,7 @@ TEST(ExpressionTrimTest, DoesOptimizeToConstantWithCustomChars) {
expCtx,
BSON("$trim" << BSON("input"
<< " abc "
- << "chars"
- << BSON("$substrCP" << BSON_ARRAY(" " << 1 << 1)))),
+ << "chars" << BSON("$substrCP" << BSON_ARRAY(" " << 1 << 1)))),
expCtx->variablesParseState);
optimized = trim->optimize();
constant = dynamic_cast<ExpressionConstant*>(optimized.get());
@@ -5903,8 +5850,9 @@ class FalseViaInt : public ExpectedResultBase {
class Null : public ExpectedResultBase {
Document getSpec() {
- return DOC("input" << DOC_ARRAY(BSONNULL) << "error" << DOC_ARRAY("$allElementsTrue"_sd
- << "$anyElementTrue"_sd));
+ return DOC("input" << DOC_ARRAY(BSONNULL) << "error"
+ << DOC_ARRAY("$allElementsTrue"_sd
+ << "$anyElementTrue"_sd));
}
};
@@ -6491,5 +6439,5 @@ TEST(NowAndClusterTime, BasicTest) {
ASSERT_VALUE_EQ(result, Value{true});
}
}
-}
+} // namespace NowAndClusterTime
} // namespace ExpressionTests
diff --git a/src/mongo/db/pipeline/expression_trigonometric.h b/src/mongo/db/pipeline/expression_trigonometric.h
index 41f10ca2e29..cc8ca852f8b 100644
--- a/src/mongo/db/pipeline/expression_trigonometric.h
+++ b/src/mongo/db/pipeline/expression_trigonometric.h
@@ -135,12 +135,8 @@ public:
void assertBounds(T input) const {
uassert(50989,
str::stream() << "cannot apply " << getOpName() << " to " << toString(input)
- << ", value must in "
- << BoundType::leftBracket()
- << getLowerBound()
- << ","
- << getUpperBound()
- << BoundType::rightBracket(),
+ << ", value must in " << BoundType::leftBracket() << getLowerBound()
+ << "," << getUpperBound() << BoundType::rightBracket(),
checkBounds(input));
}
diff --git a/src/mongo/db/pipeline/expression_trigonometric_test.cpp b/src/mongo/db/pipeline/expression_trigonometric_test.cpp
index 49ea60e1f9b..b9356e60bae 100644
--- a/src/mongo/db/pipeline/expression_trigonometric_test.cpp
+++ b/src/mongo/db/pipeline/expression_trigonometric_test.cpp
@@ -1403,4 +1403,4 @@ TEST(ExpressionDegreesToRadiansTest, DecimalArg) {
TEST(ExpressionDegreesToRadiansTest, NullArg) {
assertEvaluates("$degreesToRadians", Value(BSONNULL), Value(BSONNULL));
}
-} // namespace expression_trigonometric_test
+} // namespace expression_tests
diff --git a/src/mongo/db/pipeline/field_path.cpp b/src/mongo/db/pipeline/field_path.cpp
index bb26fc478ca..4c9e23a86df 100644
--- a/src/mongo/db/pipeline/field_path.cpp
+++ b/src/mongo/db/pipeline/field_path.cpp
@@ -81,4 +81,4 @@ void FieldPath::uassertValidFieldName(StringData fieldName) {
uassert(
16412, "FieldPath field names may not contain '.'.", fieldName.find('.') == string::npos);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/field_path.h b/src/mongo/db/pipeline/field_path.h
index 347b236fb6b..bbc775be9db 100644
--- a/src/mongo/db/pipeline/field_path.h
+++ b/src/mongo/db/pipeline/field_path.h
@@ -136,4 +136,4 @@ inline bool operator<(const FieldPath& lhs, const FieldPath& rhs) {
inline bool operator==(const FieldPath& lhs, const FieldPath& rhs) {
return lhs.fullPath() == rhs.fullPath();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp b/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp
index 6db3d45ea78..56164da1b21 100644
--- a/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp
+++ b/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp
@@ -106,13 +106,9 @@ void testRoundingUpInSeries(intrusive_ptr<GranularityRounder> rounder) {
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << input.coerceToDouble()
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
+ << " failed rounding up the value " << input.coerceToDouble()
+ << " at multiplier level " << multiplier << ". Expected "
+ << expectedValue.coerceToDouble() << ", but got "
<< roundedValue.coerceToDouble());
}
}
@@ -140,15 +136,12 @@ void testRoundingUpInSeriesDecimal(intrusive_ptr<GranularityRounder> rounder) {
try {
testEquals(roundedValue, expectedValue);
} catch (...) {
- FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << input.coerceToDecimal().toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
- << roundedValue.coerceToDecimal().toString());
+ FAIL(str::stream()
+ << "The GranularityRounder for " << rounder->getName()
+ << " failed rounding up the value " << input.coerceToDecimal().toString()
+ << " at multiplier level " << multiplier.toString() << ". Expected "
+ << expectedValue.coerceToDecimal().toString() << ", but got "
+ << roundedValue.coerceToDecimal().toString());
}
}
multiplier = multiplier.multiply(Decimal128(10));
@@ -175,15 +168,11 @@ void testRoundingUpBetweenSeries(intrusive_ptr<GranularityRounder> rounder) {
try {
testEquals(roundedValue, expectedValue);
} catch (...) {
- FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << middle
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
- << roundedValue.coerceToDouble());
+ FAIL(str::stream()
+ << "The GranularityRounder for " << rounder->getName()
+ << " failed rounding up the value " << middle << " at multiplier level "
+ << multiplier << ". Expected " << expectedValue.coerceToDouble()
+ << ", but got " << roundedValue.coerceToDouble());
}
}
multiplier *= 10.0;
@@ -212,14 +201,10 @@ void testRoundingUpBetweenSeriesDecimal(intrusive_ptr<GranularityRounder> rounde
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << middle.toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
- << roundedValue.coerceToDecimal().toString());
+ << " failed rounding up the value " << middle.toString()
+ << " at multiplier level " << multiplier.toString()
+ << ". Expected " << expectedValue.coerceToDecimal().toString()
+ << ", but got " << roundedValue.coerceToDecimal().toString());
}
}
multiplier = multiplier.multiply(Decimal128(10));
@@ -244,13 +229,9 @@ void testRoundingDownInSeries(intrusive_ptr<GranularityRounder> rounder) {
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << input.coerceToDouble()
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
+ << " failed rounding down the value " << input.coerceToDouble()
+ << " at multiplier level " << multiplier << ". Expected "
+ << expectedValue.coerceToDouble() << ", but got "
<< roundedValue.coerceToDouble());
}
}
@@ -277,15 +258,12 @@ void testRoundingDownInSeriesDecimal(intrusive_ptr<GranularityRounder> rounder)
try {
testEquals(roundedValue, expectedValue);
} catch (...) {
- FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << input.coerceToDecimal().toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
- << roundedValue.coerceToDecimal().toString());
+ FAIL(str::stream()
+ << "The GranularityRounder for " << rounder->getName()
+ << " failed rounding down the value " << input.coerceToDecimal().toString()
+ << " at multiplier level " << multiplier.toString() << ". Expected "
+ << expectedValue.coerceToDecimal().toString() << ", but got "
+ << roundedValue.coerceToDecimal().toString());
}
}
multiplier = multiplier.multiply(Decimal128(10));
@@ -312,15 +290,11 @@ void testRoundingDownBetweenSeries(intrusive_ptr<GranularityRounder> rounder) {
try {
testEquals(roundedValue, expectedValue);
} catch (...) {
- FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << middle
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
- << roundedValue.coerceToDouble());
+ FAIL(str::stream()
+ << "The GranularityRounder for " << rounder->getName()
+ << " failed rounding down the value " << middle << " at multiplier level "
+ << multiplier << ". Expected " << expectedValue.coerceToDouble()
+ << ", but got " << roundedValue.coerceToDouble());
}
}
multiplier *= 10.0;
@@ -349,14 +323,10 @@ void testRoundingDownBetweenSeriesDecimal(intrusive_ptr<GranularityRounder> roun
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << middle.toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
- << roundedValue.coerceToDecimal().toString());
+ << " failed rounding down the value " << middle.toString()
+ << " at multiplier level " << multiplier.toString()
+ << ". Expected " << expectedValue.coerceToDecimal().toString()
+ << ", but got " << roundedValue.coerceToDecimal().toString());
}
}
multiplier = multiplier.multiply(Decimal128(10));
@@ -383,13 +353,9 @@ void testSeriesWrappingAround(intrusive_ptr<GranularityRounder> rounder) {
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << input.coerceToDouble()
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
+ << " failed rounding up the value " << input.coerceToDouble()
+ << " at multiplier level " << multiplier << ". Expected "
+ << expectedValue.coerceToDouble() << ", but got "
<< roundedValue.coerceToDouble());
}
@@ -400,13 +366,9 @@ void testSeriesWrappingAround(intrusive_ptr<GranularityRounder> rounder) {
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << input.coerceToDouble()
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
+ << " failed rounding down the value " << input.coerceToDouble()
+ << " at multiplier level " << multiplier << ". Expected "
+ << expectedValue.coerceToDouble() << ", but got "
<< roundedValue.coerceToDouble());
}
multiplier *= 10.0;
@@ -430,12 +392,9 @@ void testSeriesWrappingAroundDecimal(intrusive_ptr<GranularityRounder> rounder)
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
<< " failed rounding up the value "
- << input.coerceToDecimal().toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
+ << input.coerceToDecimal().toString() << " at multiplier level "
+ << multiplier.toString() << ". Expected "
+ << expectedValue.coerceToDecimal().toString() << ", but got "
<< roundedValue.coerceToDecimal().toString());
}
@@ -449,12 +408,9 @@ void testSeriesWrappingAroundDecimal(intrusive_ptr<GranularityRounder> rounder)
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
<< " failed rounding down the value "
- << input.coerceToDecimal().toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
+ << input.coerceToDecimal().toString() << " at multiplier level "
+ << multiplier.toString() << ". Expected "
+ << expectedValue.coerceToDecimal().toString() << ", but got "
<< roundedValue.coerceToDecimal().toString());
}
multiplier.multiply(Decimal128(10));
diff --git a/src/mongo/db/pipeline/lite_parsed_document_source.cpp b/src/mongo/db/pipeline/lite_parsed_document_source.cpp
index 87aebb72238..28b5b133a65 100644
--- a/src/mongo/db/pipeline/lite_parsed_document_source.cpp
+++ b/src/mongo/db/pipeline/lite_parsed_document_source.cpp
@@ -61,4 +61,4 @@ std::unique_ptr<LiteParsedDocumentSource> LiteParsedDocumentSource::parse(
return it->second(request, specElem);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/lite_parsed_pipeline.cpp b/src/mongo/db/pipeline/lite_parsed_pipeline.cpp
index b1802c91970..81a10467a58 100644
--- a/src/mongo/db/pipeline/lite_parsed_pipeline.cpp
+++ b/src/mongo/db/pipeline/lite_parsed_pipeline.cpp
@@ -54,8 +54,7 @@ void LiteParsedPipeline::assertSupportsReadConcern(
uassert(ErrorCodes::InvalidOptions,
str::stream() << "Explain for the aggregate command cannot run with a readConcern "
<< "other than 'local', or in a multi-document transaction. Current "
- << "readConcern: "
- << readConcern.toString(),
+ << "readConcern: " << readConcern.toString(),
!explain || readConcern.getLevel() == repl::ReadConcernLevel::kLocalReadConcern);
for (auto&& spec : _stageSpecs) {
diff --git a/src/mongo/db/pipeline/lookup_set_cache.h b/src/mongo/db/pipeline/lookup_set_cache.h
index 11c28852f34..54b75d03934 100644
--- a/src/mongo/db/pipeline/lookup_set_cache.h
+++ b/src/mongo/db/pipeline/lookup_set_cache.h
@@ -47,10 +47,10 @@
namespace mongo {
using boost::multi_index_container;
-using boost::multi_index::sequenced;
using boost::multi_index::hashed_unique;
-using boost::multi_index::member;
using boost::multi_index::indexed_by;
+using boost::multi_index::member;
+using boost::multi_index::sequenced;
/**
* A least-recently-used cache from key to a vector of values. It does not implement any default
diff --git a/src/mongo/db/pipeline/mongos_process_interface.cpp b/src/mongo/db/pipeline/mongos_process_interface.cpp
index d91233c52db..05395e2c7ca 100644
--- a/src/mongo/db/pipeline/mongos_process_interface.cpp
+++ b/src/mongo/db/pipeline/mongos_process_interface.cpp
@@ -227,15 +227,12 @@ boost::optional<Document> MongoSInterface::lookupSingleDocument(
uassert(ErrorCodes::InternalError,
str::stream() << "Shard cursor was unexpectedly open after lookup: "
<< shardResult.front().getHostAndPort()
- << ", id: "
- << cursor.getCursorId(),
+ << ", id: " << cursor.getCursorId(),
cursor.getCursorId() == 0);
uassert(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "found more than one document matching " << filter.toString() << " ["
- << batch.begin()->toString()
- << ", "
- << std::next(batch.begin())->toString()
- << "]",
+ << batch.begin()->toString() << ", "
+ << std::next(batch.begin())->toString() << "]",
batch.size() <= 1u);
return (!batch.empty() ? Document(batch.front()) : boost::optional<Document>{});
diff --git a/src/mongo/db/pipeline/parsed_aggregation_projection.cpp b/src/mongo/db/pipeline/parsed_aggregation_projection.cpp
index 058e20b6d0b..75e0a95cd5d 100644
--- a/src/mongo/db/pipeline/parsed_aggregation_projection.cpp
+++ b/src/mongo/db/pipeline/parsed_aggregation_projection.cpp
@@ -81,11 +81,7 @@ void ProjectionSpecValidator::ensurePathDoesNotConflictOrThrow(const std::string
uassert(40176,
str::stream() << "specification contains two conflicting paths. "
"Cannot specify both '"
- << path
- << "' and '"
- << *conflictingPath
- << "': "
- << _rawObj.toString(),
+ << path << "' and '" << *conflictingPath << "': " << _rawObj.toString(),
!conflictingPath);
}
@@ -124,10 +120,8 @@ void ProjectionSpecValidator::parseNestedObject(const BSONObj& thisLevelSpec,
uasserted(40181,
str::stream() << "an expression specification must contain exactly "
"one field, the name of the expression. Found "
- << thisLevelSpec.nFields()
- << " fields in "
- << thisLevelSpec.toString()
- << ", while parsing object "
+ << thisLevelSpec.nFields() << " fields in "
+ << thisLevelSpec.toString() << ", while parsing object "
<< _rawObj.toString());
}
ensurePathDoesNotConflictOrThrow(prefix.fullPath());
@@ -136,8 +130,7 @@ void ProjectionSpecValidator::parseNestedObject(const BSONObj& thisLevelSpec,
if (fieldName.find('.') != std::string::npos) {
uasserted(40183,
str::stream() << "cannot use dotted field name '" << fieldName
- << "' in a sub object: "
- << _rawObj.toString());
+ << "' in a sub object: " << _rawObj.toString());
}
parseElement(elem, FieldPath::getFullyQualifiedPath(prefix.fullPath(), fieldName));
}
@@ -240,23 +233,25 @@ private:
} else if ((elem.isBoolean() || elem.isNumber()) && !elem.trueValue()) {
// If this is an excluded field other than '_id', ensure that the projection type has
// not already been set to kInclusionProjection.
- uassert(40178,
- str::stream() << "Bad projection specification, cannot exclude fields "
- "other than '_id' in an inclusion projection: "
- << _rawObj.toString(),
- !_parsedType || (*_parsedType ==
- TransformerInterface::TransformerType::kExclusionProjection));
+ uassert(
+ 40178,
+ str::stream() << "Bad projection specification, cannot exclude fields "
+ "other than '_id' in an inclusion projection: "
+ << _rawObj.toString(),
+ !_parsedType ||
+ (*_parsedType == TransformerInterface::TransformerType::kExclusionProjection));
_parsedType = TransformerInterface::TransformerType::kExclusionProjection;
} else {
// A boolean true, a truthy numeric value, or any expression can only be used with an
// inclusion projection. Note that literal values like "string" or null are also treated
// as expressions.
- uassert(40179,
- str::stream() << "Bad projection specification, cannot include fields or "
- "add computed fields during an exclusion projection: "
- << _rawObj.toString(),
- !_parsedType || (*_parsedType ==
- TransformerInterface::TransformerType::kInclusionProjection));
+ uassert(
+ 40179,
+ str::stream() << "Bad projection specification, cannot include fields or "
+ "add computed fields during an exclusion projection: "
+ << _rawObj.toString(),
+ !_parsedType ||
+ (*_parsedType == TransformerInterface::TransformerType::kInclusionProjection));
_parsedType = TransformerInterface::TransformerType::kInclusionProjection;
}
}
diff --git a/src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp b/src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp
index 15efa442726..27ce39b9c86 100644
--- a/src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp
+++ b/src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp
@@ -149,15 +149,13 @@ TEST(ParsedAggregationProjectionErrors, ShouldRejectPathConflictsWithNonAlphaNum
// Then assert that we throw when we introduce a prefixed field.
ASSERT_THROWS(
- makeProjectionWithDefaultPolicies(
- BSON("a.b-c" << true << "a.b" << true << "a.b?c" << true << "a.b c" << true << "a.b.d"
- << true)),
- AssertionException);
- ASSERT_THROWS(
- makeProjectionWithDefaultPolicies(BSON(
- "a.b.d" << false << "a.b c" << false << "a.b?c" << false << "a.b" << false << "a.b-c"
- << false)),
+ makeProjectionWithDefaultPolicies(BSON("a.b-c" << true << "a.b" << true << "a.b?c" << true
+ << "a.b c" << true << "a.b.d" << true)),
AssertionException);
+ ASSERT_THROWS(makeProjectionWithDefaultPolicies(BSON("a.b.d" << false << "a.b c" << false
+ << "a.b?c" << false << "a.b"
+ << false << "a.b-c" << false)),
+ AssertionException);
// Adding the same field twice.
ASSERT_THROWS(makeProjectionWithDefaultPolicies(
@@ -168,34 +166,24 @@ TEST(ParsedAggregationProjectionErrors, ShouldRejectPathConflictsWithNonAlphaNum
AssertionException);
// Mix of include/exclude and adding a shared prefix.
- ASSERT_THROWS(
- makeProjectionWithDefaultPolicies(
- BSON("a.b-c" << true << "a.b" << wrapInLiteral(1) << "a.b?c" << true << "a.b c" << true
- << "a.b.d"
- << true)),
- AssertionException);
+ ASSERT_THROWS(makeProjectionWithDefaultPolicies(
+ BSON("a.b-c" << true << "a.b" << wrapInLiteral(1) << "a.b?c" << true
+ << "a.b c" << true << "a.b.d" << true)),
+ AssertionException);
ASSERT_THROWS(makeProjectionWithDefaultPolicies(
BSON("a.b.d" << false << "a.b c" << false << "a.b?c" << false << "a.b"
- << wrapInLiteral(0)
- << "a.b-c"
- << false)),
+ << wrapInLiteral(0) << "a.b-c" << false)),
AssertionException);
// Adding a shared prefix twice.
ASSERT_THROWS(makeProjectionWithDefaultPolicies(
BSON("a.b-c" << wrapInLiteral(1) << "a.b" << wrapInLiteral(1) << "a.b?c"
- << wrapInLiteral(1)
- << "a.b c"
- << wrapInLiteral(1)
- << "a.b.d"
+ << wrapInLiteral(1) << "a.b c" << wrapInLiteral(1) << "a.b.d"
<< wrapInLiteral(0))),
AssertionException);
ASSERT_THROWS(makeProjectionWithDefaultPolicies(
BSON("a.b.d" << wrapInLiteral(1) << "a.b c" << wrapInLiteral(1) << "a.b?c"
- << wrapInLiteral(1)
- << "a.b"
- << wrapInLiteral(0)
- << "a.b-c"
+ << wrapInLiteral(1) << "a.b" << wrapInLiteral(0) << "a.b-c"
<< wrapInLiteral(1))),
AssertionException);
}
diff --git a/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp b/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
index 09d4b0cb4d6..f99a82c7546 100644
--- a/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
+++ b/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
@@ -240,10 +240,8 @@ TEST(InclusionProjectionExecutionTest, ShouldOptimizeNestedExpressions) {
TEST(InclusionProjectionExecutionTest, ShouldReportThatAllExceptIncludedFieldsAreModified) {
auto inclusion = makeInclusionProjectionWithDefaultPolicies();
- inclusion.parse(BSON(
- "a" << wrapInLiteral("computedVal") << "b.c" << wrapInLiteral("computedVal") << "d" << true
- << "e.f"
- << true));
+ inclusion.parse(BSON("a" << wrapInLiteral("computedVal") << "b.c"
+ << wrapInLiteral("computedVal") << "d" << true << "e.f" << true));
auto modifiedPaths = inclusion.getModifiedPaths();
ASSERT(modifiedPaths.type == DocumentSource::GetModPathsReturn::Type::kAllExcept);
@@ -261,11 +259,7 @@ TEST(InclusionProjectionExecutionTest,
ShouldReportThatAllExceptIncludedFieldsAreModifiedWithIdExclusion) {
auto inclusion = makeInclusionProjectionWithDefaultPolicies();
inclusion.parse(BSON("_id" << false << "a" << wrapInLiteral("computedVal") << "b.c"
- << wrapInLiteral("computedVal")
- << "d"
- << true
- << "e.f"
- << true));
+ << wrapInLiteral("computedVal") << "d" << true << "e.f" << true));
auto modifiedPaths = inclusion.getModifiedPaths();
ASSERT(modifiedPaths.type == DocumentSource::GetModPathsReturn::Type::kAllExcept);
@@ -573,11 +567,10 @@ TEST(InclusionProjectionExecutionTest, ShouldAllowMixedNestedAndDottedFields) {
auto inclusion = makeInclusionProjectionWithDefaultPolicies();
// Include all of "a.b", "a.c", "a.d", and "a.e".
// Add new computed fields "a.W", "a.X", "a.Y", and "a.Z".
- inclusion.parse(BSON(
- "a.b" << true << "a.c" << true << "a.W" << wrapInLiteral("W") << "a.X" << wrapInLiteral("X")
- << "a"
- << BSON("d" << true << "e" << true << "Y" << wrapInLiteral("Y") << "Z"
- << wrapInLiteral("Z"))));
+ inclusion.parse(BSON("a.b" << true << "a.c" << true << "a.W" << wrapInLiteral("W") << "a.X"
+ << wrapInLiteral("X") << "a"
+ << BSON("d" << true << "e" << true << "Y" << wrapInLiteral("Y")
+ << "Z" << wrapInLiteral("Z"))));
auto result = inclusion.applyProjection(Document{
{"a",
Document{{"b", "b"_sd}, {"c", "c"_sd}, {"d", "d"_sd}, {"e", "e"_sd}, {"f", "f"_sd}}}});
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index 73b14b80262..3337a79e4b8 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -169,9 +169,9 @@ void Pipeline::validateTopLevelPipeline() const {
if (nss.isCollectionlessAggregateNS() &&
!firstStageConstraints.isIndependentOfAnyCollection) {
uasserted(ErrorCodes::InvalidNamespace,
- str::stream() << "{aggregate: 1} is not valid for '"
- << _sources.front()->getSourceName()
- << "'; a collection is required.");
+ str::stream()
+ << "{aggregate: 1} is not valid for '"
+ << _sources.front()->getSourceName() << "'; a collection is required.");
}
if (!nss.isCollectionlessAggregateNS() &&
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index f133a67c9bc..3efb9b5813f 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -508,9 +508,9 @@ PipelineD::buildInnerQueryExecutorGeneric(Collection* collection,
(pipeline->peekFront() && pipeline->peekFront()->constraints().isChangeStreamStage());
auto attachExecutorCallback = [deps, queryObj, sortObj, projForQuery, trackOplogTS](
- Collection* collection,
- std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
- Pipeline* pipeline) {
+ Collection* collection,
+ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
+ Pipeline* pipeline) {
auto cursor = DocumentSourceCursor::create(
collection, std::move(exec), pipeline->getContext(), trackOplogTS);
addCursorSource(
@@ -566,15 +566,14 @@ PipelineD::buildInnerQueryExecutorGeoNear(Collection* collection,
str::stream() << "Unexpectedly got the following sort from the query system: "
<< sortFromQuerySystem.jsonString());
- auto attachExecutorCallback =
- [
- deps,
- distanceField = geoNearStage->getDistanceField(),
- locationField = geoNearStage->getLocationField(),
- distanceMultiplier = geoNearStage->getDistanceMultiplier().value_or(1.0)
- ](Collection * collection,
- std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
- Pipeline * pipeline) {
+ auto attachExecutorCallback = [deps,
+ distanceField = geoNearStage->getDistanceField(),
+ locationField = geoNearStage->getLocationField(),
+ distanceMultiplier =
+ geoNearStage->getDistanceMultiplier().value_or(1.0)](
+ Collection* collection,
+ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
+ Pipeline* pipeline) {
auto cursor = DocumentSourceGeoNearCursor::create(collection,
std::move(exec),
pipeline->getContext(),
diff --git a/src/mongo/db/pipeline/pipeline_metadata_tree.h b/src/mongo/db/pipeline/pipeline_metadata_tree.h
index fe8c1f02770..1a22c452590 100644
--- a/src/mongo/db/pipeline/pipeline_metadata_tree.h
+++ b/src/mongo/db/pipeline/pipeline_metadata_tree.h
@@ -117,8 +117,7 @@ inline auto findStageContents(const NamespaceString& ns,
auto it = initialStageContents.find(ns);
uassert(51213,
str::stream() << "Metadata to initialize an aggregation pipeline associated with "
- << ns.coll()
- << " is missing.",
+ << ns.coll() << " is missing.",
it != initialStageContents.end());
return it->second;
}
@@ -154,7 +153,7 @@ inline auto makeAdditionalChildren(
std::vector<T> offTheEndContents;
if (auto lookupSource = dynamic_cast<const DocumentSourceLookUp*>(&source);
lookupSource && lookupSource->wasConstructedWithPipelineSyntax()) {
- auto[child, offTheEndReshaper] =
+ auto [child, offTheEndReshaper] =
makeTreeWithOffTheEndStage(std::move(initialStageContents),
lookupSource->getResolvedIntrospectionPipeline(),
propagator);
@@ -166,7 +165,7 @@ inline auto makeAdditionalChildren(
facetSource->getFacetPipelines().end(),
std::back_inserter(children),
[&](const auto& fPipe) {
- auto[child, offTheEndReshaper] = makeTreeWithOffTheEndStage(
+ auto [child, offTheEndReshaper] = makeTreeWithOffTheEndStage(
std::move(initialStageContents), *fPipe.pipeline, propagator);
offTheEndContents.push_back(offTheEndReshaper(child.get().contents));
return std::move(*child);
@@ -192,13 +191,15 @@ inline auto makeStage(
auto contents = (previous) ? reshapeContents(previous.get().contents)
: findStageContents(source.getContext()->ns, initialStageContents);
- auto[additionalChildren, offTheEndContents] =
+ auto [additionalChildren, offTheEndContents] =
makeAdditionalChildren(std::move(initialStageContents), source, propagator, contents);
auto principalChild = previous ? std::make_unique<Stage<T>>(std::move(previous.get()))
: std::unique_ptr<Stage<T>>();
- std::function<T(const T&)> reshaper([&, offTheEndContents{std::move(offTheEndContents)} ](
- const T& reshapable) { return propagator(reshapable, offTheEndContents, source); });
+ std::function<T(const T&)> reshaper(
+ [&, offTheEndContents{std::move(offTheEndContents)}](const T& reshapable) {
+ return propagator(reshapable, offTheEndContents, source);
+ });
return std::pair(
boost::optional<Stage<T>>(
Stage(std::move(contents), std::move(principalChild), std::move(additionalChildren))),
@@ -278,7 +279,7 @@ inline std::pair<boost::optional<Stage<T>>, T> makeTree(
findStageContents(pipeline.getContext()->ns, initialStageContents));
}
- auto && [ finalStage, reshaper ] =
+ auto&& [finalStage, reshaper] =
detail::makeTreeWithOffTheEndStage(std::move(initialStageContents), pipeline, propagator);
return std::pair(std::move(*finalStage), reshaper(finalStage.get().contents));
diff --git a/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp b/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp
index 25a161c2048..5a15074b361 100644
--- a/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp
+++ b/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp
@@ -129,7 +129,8 @@ TEST_F(PipelineMetadataTreeTest, LinearPipelinesConstructProperTrees) {
auto pipePtr = jsonToPipeline("[{$project: {name: 1}}]");
return makeTree<TestThing>(
{{NamespaceString("test.collection"), initial}}, *pipePtr, ignoreDocumentSourceAddOne);
- }().first.get() == Stage(TestThing{23}, {}, {}));
+ }()
+ .first.get() == Stage(TestThing{23}, {}, {}));
ASSERT([&]() {
auto pipePtr = jsonToPipeline(
@@ -137,7 +138,8 @@ TEST_F(PipelineMetadataTreeTest, LinearPipelinesConstructProperTrees) {
"{$match: {status: \"completed\"}}]");
return makeTree<TestThing>(
{{NamespaceString("test.collection"), initial}}, *pipePtr, ignoreDocumentSourceAddOne);
- }().first.get() == Stage(TestThing{24}, makeUniqueStage(TestThing{23}, {}, {}), {}));
+ }()
+ .first.get() == Stage(TestThing{24}, makeUniqueStage(TestThing{23}, {}, {}), {}));
ASSERT([&]() {
auto pipePtr = jsonToPipeline(
@@ -149,7 +151,8 @@ TEST_F(PipelineMetadataTreeTest, LinearPipelinesConstructProperTrees) {
"{$match: {status: \"completed\"}}]");
return makeTree<TestThing>(
{{NamespaceString("test.collection"), initial}}, *pipePtr, ignoreDocumentSourceAddOne);
- }().first.get() ==
+ }()
+ .first.get() ==
Stage(TestThing{28},
makeUniqueStage(
TestThing{27},
@@ -247,7 +250,8 @@ TEST_F(PipelineMetadataTreeTest, BranchingPipelinesConstructProperTrees) {
{NamespaceString("test.instruments"), {"2"}}},
*pipePtr,
buildRepresentativeString);
- }().first.get() ==
+ }()
+ .first.get() ==
Stage(TestThing{"1mpxul[2m]ulu"},
makeUniqueStage(
TestThing{"1mpxul[2m]ul"},
@@ -283,7 +287,8 @@ TEST_F(PipelineMetadataTreeTest, BranchingPipelinesConstructProperTrees) {
"{$limit: 12}]");
return makeTree<TestThing>(
{{NamespaceString("test.collection"), {""}}}, *pipePtr, buildRepresentativeString);
- }().first.get() ==
+ }()
+ .first.get() ==
Stage(TestThing{"f[tugs, tmgs, tb]"},
makeUniqueStage(
TestThing{""},
diff --git a/src/mongo/db/pipeline/process_interface_standalone.cpp b/src/mongo/db/pipeline/process_interface_standalone.cpp
index b81ee5a435d..0cb6ebc6ec9 100644
--- a/src/mongo/db/pipeline/process_interface_standalone.cpp
+++ b/src/mongo/db/pipeline/process_interface_standalone.cpp
@@ -187,7 +187,7 @@ Update MongoInterfaceStandalone::buildUpdateOp(
for (auto&& obj : batch) {
updateEntries.push_back([&] {
UpdateOpEntry entry;
- auto && [ q, u, c ] = obj;
+ auto&& [q, u, c] = obj;
entry.setQ(std::move(q));
entry.setU(std::move(u));
entry.setC(std::move(c));
@@ -339,8 +339,7 @@ void MongoInterfaceStandalone::renameIfOptionsAndIndexesHaveNotChanged(
str::stream() << "collection options of target collection " << targetNs.ns()
<< " changed during processing. Original options: "
<< originalCollectionOptions
- << ", new options: "
- << getCollectionOptions(targetNs),
+ << ", new options: " << getCollectionOptions(targetNs),
SimpleBSONObjComparator::kInstance.evaluate(originalCollectionOptions ==
getCollectionOptions(targetNs)));
@@ -465,12 +464,8 @@ boost::optional<Document> MongoInterfaceStandalone::lookupSingleDocument(
if (auto next = pipeline->getNext()) {
uasserted(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "found more than one document with document key "
- << documentKey.toString()
- << " ["
- << lookedUpDocument->toString()
- << ", "
- << next->toString()
- << "]");
+ << documentKey.toString() << " [" << lookedUpDocument->toString()
+ << ", " << next->toString() << "]");
}
// Set the speculative read timestamp appropriately after we do a document lookup locally. We
@@ -620,14 +615,12 @@ void MongoInterfaceStandalone::_reportCurrentOpsForIdleSessions(OperationContext
? makeSessionFilterForAuthenticatedUsers(opCtx)
: KillAllSessionsByPatternSet{{}});
- sessionCatalog->scanSessions(
- {std::move(sessionFilter)},
- [&](const ObservableSession& session) {
- auto op = TransactionParticipant::get(session).reportStashedState(opCtx);
- if (!op.isEmpty()) {
- ops->emplace_back(op);
- }
- });
+ sessionCatalog->scanSessions({std::move(sessionFilter)}, [&](const ObservableSession& session) {
+ auto op = TransactionParticipant::get(session).reportStashedState(opCtx);
+ if (!op.isEmpty()) {
+ ops->emplace_back(op);
+ }
+ });
}
std::unique_ptr<CollatorInterface> MongoInterfaceStandalone::_getCollectionDefaultCollator(
diff --git a/src/mongo/db/pipeline/process_interface_standalone_test.cpp b/src/mongo/db/pipeline/process_interface_standalone_test.cpp
index fa246fc2e9d..e522111e395 100644
--- a/src/mongo/db/pipeline/process_interface_standalone_test.cpp
+++ b/src/mongo/db/pipeline/process_interface_standalone_test.cpp
@@ -93,7 +93,7 @@ TEST_F(ProcessInterfaceStandaloneTest,
// Test that 'targetCollectionVersion' is accepted if from mongos.
expCtx->fromMongos = true;
- auto[joinKey, chunkVersion] = processInterface->ensureFieldsUniqueOrResolveDocumentKey(
+ auto [joinKey, chunkVersion] = processInterface->ensureFieldsUniqueOrResolveDocumentKey(
expCtx, {{"_id"}}, targetCollectionVersion, expCtx->ns);
ASSERT_EQ(joinKey.size(), 1UL);
ASSERT_EQ(joinKey.count(FieldPath("_id")), 1UL);
diff --git a/src/mongo/db/pipeline/resume_token.cpp b/src/mongo/db/pipeline/resume_token.cpp
index 48bb9ee1678..bc9787c4d92 100644
--- a/src/mongo/db/pipeline/resume_token.cpp
+++ b/src/mongo/db/pipeline/resume_token.cpp
@@ -90,8 +90,9 @@ ResumeToken::ResumeToken(const Document& resumeDoc) {
_typeBits = resumeDoc[kTypeBitsFieldName];
uassert(40648,
str::stream() << "Bad resume token: _typeBits of wrong type " << resumeDoc.toString(),
- _typeBits.missing() || (_typeBits.getType() == BSONType::BinData &&
- _typeBits.getBinData().type == BinDataGeneral));
+ _typeBits.missing() ||
+ (_typeBits.getType() == BSONType::BinData &&
+ _typeBits.getBinData().type == BinDataGeneral));
}
// We encode the resume token as a KeyString with the sequence:
diff --git a/src/mongo/db/pipeline/resume_token_test.cpp b/src/mongo/db/pipeline/resume_token_test.cpp
index 72894880953..d684e30cc26 100644
--- a/src/mongo/db/pipeline/resume_token_test.cpp
+++ b/src/mongo/db/pipeline/resume_token_test.cpp
@@ -360,5 +360,5 @@ TEST(ResumeToken, StringEncodingSortsCorrectly) {
{ts10_4, 0, 0, lower_uuid, Value(Document{{"_id", 0}})});
}
-} // namspace
-} // namspace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/semantic_analysis.cpp b/src/mongo/db/pipeline/semantic_analysis.cpp
index 256fbf9acbf..e44b0af957a 100644
--- a/src/mongo/db/pipeline/semantic_analysis.cpp
+++ b/src/mongo/db/pipeline/semantic_analysis.cpp
@@ -92,7 +92,7 @@ StringMap<std::string> computeNamesAssumingAnyPathsNotRenamedAreUnmodified(
StringMap<std::string> invertRenameMap(const StringMap<std::string>& originalMap) {
StringMap<std::string> reversedMap;
- for (auto && [ newName, oldName ] : originalMap) {
+ for (auto&& [newName, oldName] : originalMap) {
reversedMap[oldName] = newName;
}
return reversedMap;
@@ -206,7 +206,7 @@ boost::optional<StringMap<std::string>> renamedPaths(const std::set<std::string>
}
case DocumentSource::GetModPathsReturn::Type::kAllExcept: {
auto preservedPaths = modifiedPathsRet.paths;
- for (auto && [ newName, oldName ] : modifiedPathsRet.renames) {
+ for (auto&& [newName, oldName] : modifiedPathsRet.renames) {
// For the purposes of checking which paths are modified, consider renames to
// preserve the path. We'll circle back later to figure out the new name if
// appropriate. If we are going forward, we want to consider the name of the path
diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
index afff96bab8b..aac03df6de4 100644
--- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp
+++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
@@ -37,7 +37,6 @@
#include "mongo/db/pipeline/document_source_out.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/cluster_commands_helpers.h"
-#include "mongo/s/cluster_commands_helpers.h"
#include "mongo/s/query/cluster_query_knobs_gen.h"
#include "mongo/s/query/document_source_merge_cursors.h"
#include "mongo/util/fail_point.h"
@@ -113,8 +112,7 @@ BSONObj genericTransformForShards(MutableDocument&& cmdForShards,
invariant(cmdForShards.peek()[OperationSessionInfo::kTxnNumberFieldName].missing(),
str::stream() << "Command for shards unexpectedly had the "
<< OperationSessionInfo::kTxnNumberFieldName
- << " field set: "
- << cmdForShards.peek().toString());
+ << " field set: " << cmdForShards.peek().toString());
cmdForShards[OperationSessionInfo::kTxnNumberFieldName] =
Value(static_cast<long long>(*opCtx->getTxnNumber()));
}
@@ -332,9 +330,7 @@ DispatchShardPipelineResults dispatchShardPipeline(
shardQuery);
invariant(cursors.size() % shardIds.size() == 0,
str::stream() << "Number of cursors (" << cursors.size()
- << ") is not a multiple of producers ("
- << shardIds.size()
- << ")");
+ << ") is not a multiple of producers (" << shardIds.size() << ")");
}
// Convert remote cursors into a vector of "owned" cursors.
@@ -346,9 +342,9 @@ DispatchShardPipelineResults dispatchShardPipeline(
// Record the number of shards involved in the aggregation. If we are required to merge on
// the primary shard, but the primary shard was not in the set of targeted shards, then we
// must increment the number of involved shards.
- CurOp::get(opCtx)->debug().nShards =
- shardIds.size() + (needsPrimaryShardMerge && executionNsRoutingInfo &&
- !shardIds.count(executionNsRoutingInfo->db().primaryId()));
+ CurOp::get(opCtx)->debug().nShards = shardIds.size() +
+ (needsPrimaryShardMerge && executionNsRoutingInfo &&
+ !shardIds.count(executionNsRoutingInfo->db().primaryId()));
return DispatchShardPipelineResults{needsPrimaryShardMerge,
std::move(ownedCursors),
diff --git a/src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp b/src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp
index a5b877a9e49..506acd514e8 100644
--- a/src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp
+++ b/src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp
@@ -93,12 +93,8 @@ boost::optional<Document> StubMongoProcessInterfaceLookupSingleDocument::lookupS
if (auto next = pipeline->getNext()) {
uasserted(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "found more than one document matching "
- << documentKey.toString()
- << " ["
- << lookedUpDocument->toString()
- << ", "
- << next->toString()
- << "]");
+ << documentKey.toString() << " [" << lookedUpDocument->toString()
+ << ", " << next->toString() << "]");
}
return lookedUpDocument;
}
diff --git a/src/mongo/db/pipeline/value.cpp b/src/mongo/db/pipeline/value.cpp
index 52a1c5fd71d..b804adaf797 100644
--- a/src/mongo/db/pipeline/value.cpp
+++ b/src/mongo/db/pipeline/value.cpp
@@ -389,8 +389,7 @@ void Value::addToBsonObj(BSONObjBuilder* builder,
size_t recursionLevel) const {
uassert(ErrorCodes::Overflow,
str::stream() << "cannot convert document to BSON because it exceeds the limit of "
- << BSONDepth::getMaxAllowableDepth()
- << " levels of nesting",
+ << BSONDepth::getMaxAllowableDepth() << " levels of nesting",
recursionLevel <= BSONDepth::getMaxAllowableDepth());
if (getType() == BSONType::Object) {
@@ -411,8 +410,7 @@ void Value::addToBsonObj(BSONObjBuilder* builder,
void Value::addToBsonArray(BSONArrayBuilder* builder, size_t recursionLevel) const {
uassert(ErrorCodes::Overflow,
str::stream() << "cannot convert document to BSON because it exceeds the limit of "
- << BSONDepth::getMaxAllowableDepth()
- << " levels of nesting",
+ << BSONDepth::getMaxAllowableDepth() << " levels of nesting",
recursionLevel <= BSONDepth::getMaxAllowableDepth());
// If this Value is empty, do nothing to avoid incrementing the builder's counter.
@@ -704,7 +702,7 @@ int Value::compare(const Value& rL,
case Date: // signed
return cmp(rL._storage.dateValue, rR._storage.dateValue);
- // Numbers should compare by equivalence even if different types
+ // Numbers should compare by equivalence even if different types
case NumberDecimal: {
switch (rType) {
@@ -1078,9 +1076,9 @@ size_t Value::getApproximateSize() const {
case Symbol:
case BinData:
case String:
- return sizeof(Value) + (_storage.shortStr
- ? 0 // string stored inline, so no extra mem usage
- : sizeof(RCString) + _storage.getString().size());
+ return sizeof(Value) +
+ (_storage.shortStr ? 0 // string stored inline, so no extra mem usage
+ : sizeof(RCString) + _storage.getString().size());
case Object:
return sizeof(Value) + getDocument().getApproximateSize();
diff --git a/src/mongo/db/pipeline/value.h b/src/mongo/db/pipeline/value.h
index ef0ac8b6afd..296d6d08480 100644
--- a/src/mongo/db/pipeline/value.h
+++ b/src/mongo/db/pipeline/value.h
@@ -146,7 +146,7 @@ public:
* Used when preforming arithmetic operations with int where the
* result may be too large and need to be stored as long. The Value
* will be an int if value fits, otherwise it will be a long.
- */
+ */
static Value createIntOrLong(long long value);
/** A "missing" value indicates the lack of a Value.
@@ -396,7 +396,7 @@ public:
return Value(values);
}
};
-}
+} // namespace mongo
/* ======================= INLINED IMPLEMENTATIONS ========================== */
diff --git a/src/mongo/db/pipeline/variables.cpp b/src/mongo/db/pipeline/variables.cpp
index cf6b81e9605..8a37fecc10f 100644
--- a/src/mongo/db/pipeline/variables.cpp
+++ b/src/mongo/db/pipeline/variables.cpp
@@ -68,9 +68,7 @@ void Variables::uassertValidNameForUserWrite(StringData varName) {
uassert(16868,
str::stream() << "'" << varName << "' contains an invalid character "
- << "for a variable name: '"
- << varName[i]
- << "'",
+ << "for a variable name: '" << varName[i] << "'",
charIsValid);
}
}
@@ -95,9 +93,7 @@ void Variables::uassertValidNameForUserRead(StringData varName) {
uassert(16871,
str::stream() << "'" << varName << "' contains an invalid character "
- << "for a variable name: '"
- << varName[i]
- << "'",
+ << "for a variable name: '" << varName[i] << "'",
charIsValid);
}
}
@@ -258,4 +254,4 @@ std::set<Variables::Id> VariablesParseState::getDefinedVariableIDs() const {
return ids;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/variables.h b/src/mongo/db/pipeline/variables.h
index 2a606ac0572..ddb76457cac 100644
--- a/src/mongo/db/pipeline/variables.h
+++ b/src/mongo/db/pipeline/variables.h
@@ -157,7 +157,7 @@ private:
void setValue(Id id, const Value& value, bool isConstant);
static auto getBuiltinVariableName(Variables::Id variable) {
- for (auto & [ name, id ] : kBuiltinVarNameToId) {
+ for (auto& [name, id] : kBuiltinVarNameToId) {
if (variable == id) {
return name;
}
diff --git a/src/mongo/db/query/canonical_query_encoder.cpp b/src/mongo/db/query/canonical_query_encoder.cpp
index 91982bff80a..6698e56766c 100644
--- a/src/mongo/db/query/canonical_query_encoder.cpp
+++ b/src/mongo/db/query/canonical_query_encoder.cpp
@@ -427,10 +427,10 @@ void encodeKeyForMatch(const MatchExpression* tree, StringBuilder* keyBuilder) {
}
/**
-* Encodes sort order into cache key.
-* Sort order is normalized because it provided by
-* QueryRequest.
-*/
+ * Encodes sort order into cache key.
+ * Sort order is normalized because it provided by
+ * QueryRequest.
+ */
void encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) {
if (sortObj.isEmpty()) {
return;
@@ -463,12 +463,12 @@ void encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) {
}
/**
-* Encodes parsed projection into cache key.
-* Does a simple toString() on each projected field
-* in the BSON object.
-* Orders the encoded elements in the projection by field name.
-* This handles all the special projection types ($meta, $elemMatch, etc.)
-*/
+ * Encodes parsed projection into cache key.
+ * Does a simple toString() on each projected field
+ * in the BSON object.
+ * Orders the encoded elements in the projection by field name.
+ * This handles all the special projection types ($meta, $elemMatch, etc.)
+ */
void encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuilder) {
// Sorts the BSON elements by field name using a map.
std::map<StringData, BSONElement> elements;
diff --git a/src/mongo/db/query/canonical_query_encoder.h b/src/mongo/db/query/canonical_query_encoder.h
index d0019ba08c9..73c0eff5fa7 100644
--- a/src/mongo/db/query/canonical_query_encoder.h
+++ b/src/mongo/db/query/canonical_query_encoder.h
@@ -45,5 +45,5 @@ CanonicalQuery::QueryShapeString encode(const CanonicalQuery& cq);
* Returns a hash of the given key (produced from either a QueryShapeString or a PlanCacheKey).
*/
uint32_t computeHash(StringData key);
-}
-}
+} // namespace canonical_query_encoder
+} // namespace mongo
diff --git a/src/mongo/db/query/collation/collation_index_key.cpp b/src/mongo/db/query/collation/collation_index_key.cpp
index 48d971d3f7e..44b647044ca 100644
--- a/src/mongo/db/query/collation/collation_index_key.cpp
+++ b/src/mongo/db/query/collation/collation_index_key.cpp
@@ -114,9 +114,7 @@ void translateElement(StringData fieldName,
uasserted(ErrorCodes::CannotBuildIndexKeys,
str::stream()
<< "Cannot index type Symbol with a collation. Failed to index element: "
- << element
- << ". Index collation: "
- << collator->getSpec().toBSON());
+ << element << ". Index collation: " << collator->getSpec().toBSON());
}
default:
out->appendAs(element, fieldName);
@@ -144,7 +142,7 @@ void translate(BSONObj obj, const CollatorInterface* collator, BufBuilder* out)
element.fieldNameStringData(), element, collator, &ctx.getBuilder(), &ctxStack);
}
}
-}
+} // namespace
void CollationIndexKey::collationAwareIndexKeyAppend(BSONElement elt,
const CollatorInterface* collator,
diff --git a/src/mongo/db/query/collation/collation_index_key_test.cpp b/src/mongo/db/query/collation/collation_index_key_test.cpp
index 7696561060a..20a788d7df4 100644
--- a/src/mongo/db/query/collation/collation_index_key_test.cpp
+++ b/src/mongo/db/query/collation/collation_index_key_test.cpp
@@ -171,8 +171,7 @@ TEST(CollationIndexKeyTest, CollationAwareAppendThrowsIfSymbolInsideObject) {
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
BSONObj dataObj = BSON("" << BSON("a"
<< "foo"
- << "b"
- << BSONSymbol("mySymbol")));
+ << "b" << BSONSymbol("mySymbol")));
BSONObjBuilder out;
ASSERT_THROWS_CODE(
CollationIndexKey::collationAwareIndexKeyAppend(dataObj.firstElement(), &collator, &out),
diff --git a/src/mongo/db/query/collation/collation_spec_test.cpp b/src/mongo/db/query/collation/collation_spec_test.cpp
index 8036e463a54..c255476292e 100644
--- a/src/mongo/db/query/collation/collation_spec_test.cpp
+++ b/src/mongo/db/query/collation/collation_spec_test.cpp
@@ -185,23 +185,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesDefaults) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -215,23 +205,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesCaseFirstUpper) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "upper"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -245,23 +225,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesCaseFirstLower) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "lower"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -275,23 +245,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesPrimaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 1
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 1 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -305,23 +265,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesSecondaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 2
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 2 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -335,23 +285,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesQuaternaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 4
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 4 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -365,23 +305,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesIdenticalStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 5
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 5 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -395,23 +325,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesAlternateShifted) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "shifted"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -425,23 +345,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesMaxVariableSpace) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "space"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
diff --git a/src/mongo/db/query/collation/collator_factory_icu.cpp b/src/mongo/db/query/collation/collator_factory_icu.cpp
index f9662b0a22a..7f612265b71 100644
--- a/src/mongo/db/query/collation/collator_factory_icu.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu.cpp
@@ -186,13 +186,9 @@ StatusWith<CollationSpec::CaseFirstType> stringToCaseFirstType(const std::string
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kCaseFirstField << "' must be '"
- << CollationSpec::kCaseFirstUpper
- << "', '"
- << CollationSpec::kCaseFirstLower
- << "', or '"
- << CollationSpec::kCaseFirstOff
- << "'. Got: "
- << caseFirst};
+ << CollationSpec::kCaseFirstUpper << "', '"
+ << CollationSpec::kCaseFirstLower << "', or '"
+ << CollationSpec::kCaseFirstOff << "'. Got: " << caseFirst};
}
}
@@ -211,8 +207,7 @@ StatusWith<CollationSpec::StrengthType> integerToStrengthType(long long strength
}
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kStrengthField
- << "' must be an integer 1 through 5. Got: "
- << strength};
+ << "' must be an integer 1 through 5. Got: " << strength};
}
StatusWith<CollationSpec::AlternateType> stringToAlternateType(const std::string& alternate) {
@@ -223,11 +218,8 @@ StatusWith<CollationSpec::AlternateType> stringToAlternateType(const std::string
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kAlternateField << "' must be '"
- << CollationSpec::kAlternateNonIgnorable
- << "' or '"
- << CollationSpec::kAlternateShifted
- << "'. Got: "
- << alternate};
+ << CollationSpec::kAlternateNonIgnorable << "' or '"
+ << CollationSpec::kAlternateShifted << "'. Got: " << alternate};
}
}
@@ -239,11 +231,8 @@ StatusWith<CollationSpec::MaxVariableType> stringToMaxVariableType(const std::st
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kMaxVariableField << "' must be '"
- << CollationSpec::kMaxVariablePunct
- << "' or '"
- << CollationSpec::kMaxVariableSpace
- << "'. Got: "
- << maxVariable};
+ << CollationSpec::kMaxVariablePunct << "' or '"
+ << CollationSpec::kMaxVariableSpace << "'. Got: " << maxVariable};
}
}
@@ -273,10 +262,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kCaseLevelField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.caseLevel = attributeToBool(caseLevelAttribute);
} else if (!parseStatus.isOK()) {
@@ -290,10 +277,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kCaseLevelField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -308,10 +293,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kCaseFirstField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.caseFirst = getCaseFirstFromAttribute(caseFirstAttribute);
} else if (!parseStatus.isOK()) {
@@ -333,10 +316,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kCaseFirstField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -351,10 +332,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kStrengthField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.strength = getStrengthFromAttribute(strengthAttribute);
} else if (!parseStatus.isOK()) {
@@ -375,10 +354,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kStrengthField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -394,10 +371,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kNumericOrderingField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.numericOrdering = attributeToBool(numericOrderingAttribute);
} else if (!parseStatus.isOK()) {
@@ -412,10 +387,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kNumericOrderingField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -431,10 +404,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kAlternateField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.alternate = getAlternateFromAttribute(alternateAttribute);
} else if (!parseStatus.isOK()) {
@@ -456,10 +427,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kAlternateField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -486,10 +455,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kMaxVariableField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -505,10 +472,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kNormalizationField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.normalization = attributeToBool(normalizationAttribute);
} else if (!parseStatus.isOK()) {
@@ -523,10 +488,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kNormalizationField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -542,10 +505,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kBackwardsField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.backwards = attributeToBool(backwardsAttribute);
} else if (!parseStatus.isOK()) {
@@ -560,10 +521,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kBackwardsField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -585,9 +544,7 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
return {ErrorCodes::IncompatibleCollationVersion,
str::stream() << "Requested collation version " << specVersionStr
<< " but the only available collator version was "
- << parsedSpec.version
- << ". Requested collation spec: "
- << spec};
+ << parsedSpec.version << ". Requested collation spec: " << spec};
}
++parsedFields;
@@ -613,8 +570,7 @@ StatusWith<std::string> parseLocaleID(const BSONObj& spec) {
if (localeID.find('\0') != std::string::npos) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << CollationSpec::kLocaleField
- << "' cannot contain null byte. Collation spec: "
- << spec};
+ << "' cannot contain null byte. Collation spec: " << spec};
}
return localeID;
}
@@ -630,15 +586,13 @@ Status validateLocaleID(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get locale from icu::Collator: " << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << ". Collation spec: " << spec};
}
if (originalID.empty()) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << CollationSpec::kLocaleField
- << "' cannot be the empty string in: "
- << spec};
+ << "' cannot be the empty string in: " << spec};
}
// Check that each component of the locale ID is recognized by ICU. If ICU 1) cannot parse the
@@ -669,11 +623,9 @@ Status validateCollationSpec(const CollationSpec& spec) {
if (spec.backwards && spec.strength == CollationSpec::StrengthType::kPrimary) {
return {ErrorCodes::BadValue,
str::stream() << "'" << CollationSpec::kBackwardsField << "' is invalid with '"
- << CollationSpec::kStrengthField
- << "' of "
+ << CollationSpec::kStrengthField << "' of "
<< static_cast<int>(CollationSpec::StrengthType::kPrimary)
- << " in: "
- << spec.toBSON()};
+ << " in: " << spec.toBSON()};
}
// The caseFirst option only affects tertiary level or caseLevel comparisons. It will have no
@@ -683,13 +635,10 @@ Status validateCollationSpec(const CollationSpec& spec) {
spec.strength == CollationSpec::StrengthType::kSecondary)) {
return {ErrorCodes::BadValue,
str::stream() << "'" << CollationSpec::kCaseFirstField << "' is invalid unless '"
- << CollationSpec::kCaseLevelField
- << "' is on or '"
- << CollationSpec::kStrengthField
- << "' is greater than "
+ << CollationSpec::kCaseLevelField << "' is on or '"
+ << CollationSpec::kStrengthField << "' is greater than "
<< static_cast<int>(CollationSpec::StrengthType::kSecondary)
- << " in: "
- << spec.toBSON()};
+ << " in: " << spec.toBSON()};
}
return Status::OK();
@@ -712,8 +661,7 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
return {ErrorCodes::FailedToParse,
str::stream() << "If " << CollationSpec::kLocaleField << "="
<< CollationSpec::kSimpleBinaryComparison
- << ", no other fields should be present in: "
- << spec};
+ << ", no other fields should be present in: " << spec};
}
return {nullptr};
}
@@ -722,8 +670,8 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
auto userLocale = icu::Locale::createFromName(parsedLocaleID.getValue().c_str());
if (userLocale.isBogus()) {
return {ErrorCodes::BadValue,
- str::stream() << "Field '" << CollationSpec::kLocaleField << "' is not valid in: "
- << spec};
+ str::stream() << "Field '" << CollationSpec::kLocaleField
+ << "' is not valid in: " << spec};
}
// Construct an icu::Collator.
@@ -734,8 +682,7 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to create collator: " << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << ". Collation spec: " << spec};
}
Status localeValidationStatus = validateLocaleID(spec, parsedLocaleID.getValue(), *icuCollator);
diff --git a/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp b/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
index 654f4f4c7b3..6eacae4c5a1 100644
--- a/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
@@ -40,9 +40,7 @@ namespace mongo {
namespace {
ServiceContext::ConstructorActionRegisterer registerIcuCollator{
- "CreateCollatorFactory",
- {"LoadICUData"},
- [](ServiceContext* service) {
+ "CreateCollatorFactory", {"LoadICUData"}, [](ServiceContext* service) {
CollatorFactoryInterface::set(service, std::make_unique<CollatorFactoryICU>());
}};
} // namespace
diff --git a/src/mongo/db/query/collation/collator_factory_icu_test.cpp b/src/mongo/db/query/collation/collator_factory_icu_test.cpp
index 7024303dffd..d7d0edce54b 100644
--- a/src/mongo/db/query/collation/collator_factory_icu_test.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu_test.cpp
@@ -61,8 +61,7 @@ TEST(CollatorFactoryICUTest, SimpleLocaleWithOtherFieldsFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "simple"
- << "caseLevel"
- << true));
+ << "caseLevel" << true));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -444,8 +443,7 @@ TEST(CollatorFactoryICUTest, CaseLevelFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseLevel"
- << false));
+ << "caseLevel" << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().caseLevel);
}
@@ -454,8 +452,7 @@ TEST(CollatorFactoryICUTest, CaseLevelTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseLevel"
- << true));
+ << "caseLevel" << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().caseLevel);
}
@@ -497,8 +494,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -508,8 +504,7 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kSecondary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -519,8 +514,7 @@ TEST(CollatorFactoryICUTest, TertiaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 3));
+ << "strength" << 3));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kTertiary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -530,8 +524,7 @@ TEST(CollatorFactoryICUTest, QuaternaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 4));
+ << "strength" << 4));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kQuaternary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -541,8 +534,7 @@ TEST(CollatorFactoryICUTest, IdenticalStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 5));
+ << "strength" << 5));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kIdentical),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -552,8 +544,7 @@ TEST(CollatorFactoryICUTest, NumericOrderingFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering"
- << false));
+ << "numericOrdering" << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().numericOrdering);
}
@@ -562,8 +553,7 @@ TEST(CollatorFactoryICUTest, NumericOrderingTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering"
- << true));
+ << "numericOrdering" << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().numericOrdering);
}
@@ -616,8 +606,7 @@ TEST(CollatorFactoryICUTest, NormalizationFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "normalization"
- << false));
+ << "normalization" << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().normalization);
}
@@ -626,8 +615,7 @@ TEST(CollatorFactoryICUTest, NormalizationTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "normalization"
- << true));
+ << "normalization" << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().normalization);
}
@@ -636,8 +624,7 @@ TEST(CollatorFactoryICUTest, BackwardsFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards"
- << false));
+ << "backwards" << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().backwards);
}
@@ -646,8 +633,7 @@ TEST(CollatorFactoryICUTest, BackwardsTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards"
- << true));
+ << "backwards" << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().backwards);
}
@@ -656,8 +642,7 @@ TEST(CollatorFactoryICUTest, LongStrengthFieldParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1LL));
+ << "strength" << 1LL));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -667,8 +652,7 @@ TEST(CollatorFactoryICUTest, DoubleStrengthFieldParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1.0));
+ << "strength" << 1.0));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -688,8 +672,7 @@ TEST(CollatorFactoryICUTest, NonStringCaseFirstFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseFirst"
- << 1));
+ << "caseFirst" << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -718,8 +701,7 @@ TEST(CollatorFactoryICUTest, TooLargeStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2147483648LL));
+ << "strength" << 2147483648LL));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -728,8 +710,7 @@ TEST(CollatorFactoryICUTest, FractionalStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 0.5));
+ << "strength" << 0.5));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::BadValue);
}
@@ -738,8 +719,7 @@ TEST(CollatorFactoryICUTest, NegativeStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << -1));
+ << "strength" << -1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -748,8 +728,7 @@ TEST(CollatorFactoryICUTest, InvalidIntegerStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 6));
+ << "strength" << 6));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -768,8 +747,7 @@ TEST(CollatorFactoryICUTest, NonStringAlternateFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "alternate"
- << 1));
+ << "alternate" << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -788,8 +766,7 @@ TEST(CollatorFactoryICUTest, NonStringMaxVariableFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "maxVariable"
- << 1));
+ << "maxVariable" << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -846,8 +823,7 @@ TEST(CollatorFactoryICUTest, NonStringVersionFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "version"
- << 3));
+ << "version" << 3));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -879,8 +855,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCollatorIgnoresCaseAndAccents) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -892,8 +867,7 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthCollatorsIgnoresCaseButNotAccents)
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -905,8 +879,7 @@ TEST(CollatorFactoryICUTest, TertiaryStrengthCollatorConsidersCaseAndAccents) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 3));
+ << "strength" << 3));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -918,10 +891,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1
- << "caseLevel"
- << true));
+ << "strength" << 1 << "caseLevel" << true));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -931,14 +901,11 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrue) {
TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrueCaseFirstUpper) {
CollatorFactoryICU factory;
- auto collator = factory.makeFromBSON(BSON("locale"
- << "en_US"
- << "strength"
- << 1
- << "caseLevel"
- << true
- << "caseFirst"
- << "upper"));
+ auto collator =
+ factory.makeFromBSON(BSON("locale"
+ << "en_US"
+ << "strength" << 1 << "caseLevel" << true << "caseFirst"
+ << "upper"));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -948,14 +915,11 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrueCaseFirstUpper) {
TEST(CollatorFactoryICUTest, TertiaryStrengthCaseLevelTrueCaseFirstUpper) {
CollatorFactoryICU factory;
- auto collator = factory.makeFromBSON(BSON("locale"
- << "en_US"
- << "strength"
- << 3
- << "caseLevel"
- << true
- << "caseFirst"
- << "upper"));
+ auto collator =
+ factory.makeFromBSON(BSON("locale"
+ << "en_US"
+ << "strength" << 3 << "caseLevel" << true << "caseFirst"
+ << "upper"));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("A", "a"), 0);
}
@@ -972,8 +936,7 @@ TEST(CollatorFactoryICUTest, NumericOrderingTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering"
- << true));
+ << "numericOrdering" << true));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("2", "10"), 0);
}
@@ -982,9 +945,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthAlternateShifted) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1
- << "alternate"
+ << "strength" << 1 << "alternate"
<< "shifted"));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(collator.getValue()->compare("a b", "ab"), 0);
@@ -995,9 +956,7 @@ TEST(CollatorFactoryICUTest, QuaternaryStrengthAlternateShifted) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 4
- << "alternate"
+ << "strength" << 4 << "alternate"
<< "shifted"));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("a b", "ab"), 0);
@@ -1008,9 +967,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthAlternateShiftedMaxVariableSpace) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1
- << "alternate"
+ << "strength" << 1 << "alternate"
<< "shifted"
<< "maxVariable"
<< "space"));
@@ -1023,8 +980,7 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthBackwardsFalse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -1035,10 +991,7 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthBackwardsTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2
- << "backwards"
- << true));
+ << "strength" << 2 << "backwards" << true));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -1069,10 +1022,7 @@ TEST(CollatorFactoryICUTest, BackwardsTrueWithStrengthOneFails) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards"
- << true
- << "strength"
- << 1));
+ << "backwards" << true << "strength" << 1));
ASSERT_NOT_OK(collator.getStatus());
}
@@ -1080,10 +1030,7 @@ TEST(CollatorFactoryICUTest, BackwardsTrueWithStrengthTwoSucceeds) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards"
- << true
- << "strength"
- << 2));
+ << "backwards" << true << "strength" << 2));
ASSERT_OK(collator.getStatus());
}
@@ -1093,8 +1040,7 @@ TEST(CollatorFactoryICUTest, CaseFirstLowerWithStrengthThreeSucceeds) {
<< "en_US"
<< "caseFirst"
<< "lower"
- << "strength"
- << 3));
+ << "strength" << 3));
ASSERT_OK(collator.getStatus());
}
@@ -1104,8 +1050,7 @@ TEST(CollatorFactoryICUTest, CaseFirstUpperWithStrengthThreeSucceeds) {
<< "en_US"
<< "caseFirst"
<< "upper"
- << "strength"
- << 3));
+ << "strength" << 3));
ASSERT_OK(collator.getStatus());
}
@@ -1115,10 +1060,7 @@ TEST(CollatorFactoryICUTest, CaseFirstLowerWithCaseLevelSucceeds) {
<< "en_US"
<< "caseFirst"
<< "lower"
- << "caseLevel"
- << true
- << "strength"
- << 1));
+ << "caseLevel" << true << "strength" << 1));
ASSERT_OK(collator.getStatus());
}
@@ -1128,10 +1070,7 @@ TEST(CollatorFactoryICUTest, CaseFirstUpperWithCaseLevelSucceeds) {
<< "en_US"
<< "caseFirst"
<< "upper"
- << "caseLevel"
- << true
- << "strength"
- << 1));
+ << "caseLevel" << true << "strength" << 1));
ASSERT_OK(collator.getStatus());
}
@@ -1141,8 +1080,7 @@ TEST(CollatorFactoryICUTest, CaseFirstOffWithStrengthOneSucceeds) {
<< "en_US"
<< "caseFirst"
<< "off"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_OK(collator.getStatus());
}
@@ -1152,8 +1090,7 @@ TEST(CollatorFactoryICUTest, CaseFirstLowerWithStrengthOneFails) {
<< "en_US"
<< "caseFirst"
<< "lower"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_NOT_OK(collator.getStatus());
}
@@ -1163,8 +1100,7 @@ TEST(CollatorFactoryICUTest, CaseFirstLowerWithStrengthTwoFails) {
<< "en_US"
<< "caseFirst"
<< "lower"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_NOT_OK(collator.getStatus());
}
@@ -1174,8 +1110,7 @@ TEST(CollatorFactoryICUTest, CaseFirstUpperWithStrengthOneFails) {
<< "en_US"
<< "caseFirst"
<< "upper"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_NOT_OK(collator.getStatus());
}
@@ -1185,8 +1120,7 @@ TEST(CollatorFactoryICUTest, CaseFirstUpperWithStrengthTwoFails) {
<< "en_US"
<< "caseFirst"
<< "upper"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_NOT_OK(collator.getStatus());
}
diff --git a/src/mongo/db/query/collation/collator_interface_mock_test.cpp b/src/mongo/db/query/collation/collator_interface_mock_test.cpp
index d792d95c2a1..340e9690ef6 100644
--- a/src/mongo/db/query/collation/collator_interface_mock_test.cpp
+++ b/src/mongo/db/query/collation/collator_interface_mock_test.cpp
@@ -242,10 +242,12 @@ TEST(CollatorInterfaceMockSelfTest, BSONObjsEqualUnderCollatorHashEquallyNested)
SimpleBSONObjComparator bsonCmpConsiderCase;
BSONObjComparator bsonCmpIgnoreCase(
BSONObj(), BSONObjComparator::FieldNamesMode::kConsider, &toLowerCollator);
- BSONObj obj1 = BSON("a" << 1 << "b" << BSON("c"
- << "foo"));
- BSONObj obj2 = BSON("a" << 1 << "b" << BSON("c"
- << "FOO"));
+ BSONObj obj1 = BSON("a" << 1 << "b"
+ << BSON("c"
+ << "foo"));
+ BSONObj obj2 = BSON("a" << 1 << "b"
+ << BSON("c"
+ << "FOO"));
ASSERT_NE(bsonCmpConsiderCase.hash(obj1), bsonCmpConsiderCase.hash(obj2));
ASSERT_EQ(bsonCmpIgnoreCase.hash(obj1), bsonCmpIgnoreCase.hash(obj2));
}
diff --git a/src/mongo/db/query/count_command_test.cpp b/src/mongo/db/query/count_command_test.cpp
index c660bc6adec..b7ea431f678 100644
--- a/src/mongo/db/query/count_command_test.cpp
+++ b/src/mongo/db/query/count_command_test.cpp
@@ -50,8 +50,7 @@ TEST(CountCommandTest, ParserDealsWithMissingFieldsCorrectly) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "query"
- << BSON("a" << BSON("$lte" << 10)));
+ << "query" << BSON("a" << BSON("$lte" << 10)));
auto countCmd = CountCommand::parse(ctxt, commandObj);
ASSERT_BSONOBJ_EQ(countCmd.getQuery(), fromjson("{ a : { '$lte' : 10 } }"));
@@ -70,15 +69,8 @@ TEST(CountCommandTest, ParserParsesCommandWithAllFieldsCorrectly) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "query"
- << BSON("a" << BSON("$gte" << 11))
- << "limit"
- << 100
- << "skip"
- << 1000
- << "hint"
- << BSON("b" << 5)
- << "collation"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "limit" << 100 << "skip"
+ << 1000 << "hint" << BSON("b" << 5) << "collation"
<< BSON("locale"
<< "en_US")
<< "readConcern"
@@ -89,8 +81,7 @@ TEST(CountCommandTest, ParserParsesCommandWithAllFieldsCorrectly) {
<< "secondary")
<< "comment"
<< "aComment"
- << "maxTimeMS"
- << 10000);
+ << "maxTimeMS" << 10000);
const auto countCmd = CountCommand::parse(ctxt, commandObj);
ASSERT_BSONOBJ_EQ(countCmd.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
@@ -110,8 +101,7 @@ TEST(CountCommandTest, ParsingNegativeLimitGivesPositiveLimit) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "limit"
- << -100);
+ << "limit" << -100);
const auto countCmd = CountCommand::parse(ctxt, commandObj);
ASSERT_EQ(countCmd.getLimit().get(), 100);
@@ -122,9 +112,7 @@ TEST(CountCommandTest, LimitCannotBeMinLong) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "query"
- << BSON("a" << BSON("$gte" << 11))
- << "limit"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "limit"
<< std::numeric_limits<long long>::min());
ASSERT_THROWS_CODE(
@@ -132,31 +120,28 @@ TEST(CountCommandTest, LimitCannotBeMinLong) {
}
TEST(CountCommandTest, FailParseBadSkipValue) {
- ASSERT_THROWS_CODE(CountCommand::parse(ctxt,
- BSON("count"
- << "TestColl"
- << "$db"
- << "TestDB"
- << "query"
- << BSON("a" << BSON("$gte" << 11))
- << "skip"
- << -1000)),
- AssertionException,
- ErrorCodes::FailedToParse);
+ ASSERT_THROWS_CODE(
+ CountCommand::parse(ctxt,
+ BSON("count"
+ << "TestColl"
+ << "$db"
+ << "TestDB"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "skip" << -1000)),
+ AssertionException,
+ ErrorCodes::FailedToParse);
}
TEST(CountCommandTest, FailParseBadCollationType) {
- ASSERT_THROWS_CODE(CountCommand::parse(ctxt,
- BSON("count"
- << "TestColl"
- << "$db"
- << "TestDB"
- << "query"
- << BSON("a" << BSON("$gte" << 11))
- << "collation"
- << "en_US")),
- AssertionException,
- ErrorCodes::TypeMismatch);
+ ASSERT_THROWS_CODE(
+ CountCommand::parse(ctxt,
+ BSON("count"
+ << "TestColl"
+ << "$db"
+ << "TestDB"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "collation"
+ << "en_US")),
+ AssertionException,
+ ErrorCodes::TypeMismatch);
}
TEST(CountCommandTest, FailParseUnknownField) {
@@ -176,8 +161,7 @@ TEST(CountCommandTest, ConvertToAggregationWithHint) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "hint"
- << BSON("x" << 1));
+ << "hint" << BSON("x" << 1));
auto countCmd = CountCommand::parse(ctxt, commandObj);
auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns));
@@ -198,12 +182,7 @@ TEST(CountCommandTest, ConvertToAggregationWithQueryAndFilterAndLimit) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "limit"
- << 200
- << "skip"
- << 300
- << "query"
- << BSON("x" << 7));
+ << "limit" << 200 << "skip" << 300 << "query" << BSON("x" << 7));
auto countCmd = CountCommand::parse(ctxt, commandObj);
auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns));
@@ -227,9 +206,7 @@ TEST(CountCommandTest, ConvertToAggregationWithMaxTimeMS) {
auto countCmd = CountCommand::parse(ctxt,
BSON("count"
<< "TestColl"
- << "maxTimeMS"
- << 100
- << "$db"
+ << "maxTimeMS" << 100 << "$db"
<< "TestDB"));
auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns));
diff --git a/src/mongo/db/query/cursor_response.cpp b/src/mongo/db/query/cursor_response.cpp
index 39234429572..8cb8a063e1f 100644
--- a/src/mongo/db/query/cursor_response.cpp
+++ b/src/mongo/db/query/cursor_response.cpp
@@ -169,24 +169,24 @@ StatusWith<CursorResponse> CursorResponse::parseFromBSON(const BSONObj& cmdRespo
BSONElement cursorElt = cmdResponse[kCursorField];
if (cursorElt.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kCursorField << "' must be a nested object in: "
- << cmdResponse};
+ str::stream() << "Field '" << kCursorField
+ << "' must be a nested object in: " << cmdResponse};
}
BSONObj cursorObj = cursorElt.Obj();
BSONElement idElt = cursorObj[kIdField];
if (idElt.type() != BSONType::NumberLong) {
- return {
- ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kIdField << "' must be of type long in: " << cmdResponse};
+ return {ErrorCodes::TypeMismatch,
+ str::stream() << "Field '" << kIdField
+ << "' must be of type long in: " << cmdResponse};
}
cursorId = idElt.Long();
BSONElement nsElt = cursorObj[kNsField];
if (nsElt.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kNsField << "' must be of type string in: "
- << cmdResponse};
+ str::stream() << "Field '" << kNsField
+ << "' must be of type string in: " << cmdResponse};
}
fullns = nsElt.String();
@@ -198,9 +198,7 @@ StatusWith<CursorResponse> CursorResponse::parseFromBSON(const BSONObj& cmdRespo
if (batchElt.type() != BSONType::Array) {
return {ErrorCodes::TypeMismatch,
str::stream() << "Must have array field '" << kBatchFieldInitial << "' or '"
- << kBatchField
- << "' in: "
- << cmdResponse};
+ << kBatchField << "' in: " << cmdResponse};
}
batchObj = batchElt.Obj();
diff --git a/src/mongo/db/query/cursor_response_test.cpp b/src/mongo/db/query/cursor_response_test.cpp
index 3ddee43748b..ecb5d7570b6 100644
--- a/src/mongo/db/query/cursor_response_test.cpp
+++ b/src/mongo/db/query/cursor_response_test.cpp
@@ -41,13 +41,11 @@ namespace mongo {
namespace {
TEST(CursorResponseTest, parseFromBSONFirstBatch) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -59,13 +57,11 @@ TEST(CursorResponseTest, parseFromBSONFirstBatch) {
}
TEST(CursorResponseTest, parseFromBSONNextBatch) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -77,13 +73,11 @@ TEST(CursorResponseTest, parseFromBSONNextBatch) {
}
TEST(CursorResponseTest, parseFromBSONCursorIdZero) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(0) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
+ "cursor" << BSON("id" << CursorId(0) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -95,13 +89,11 @@ TEST(CursorResponseTest, parseFromBSONCursorIdZero) {
}
TEST(CursorResponseTest, parseFromBSONEmptyBatch) {
- StatusWith<CursorResponse> result =
- CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSONArrayBuilder().arr())
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSONArrayBuilder().arr())
+ << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -125,8 +117,7 @@ TEST(CursorResponseTest, parseFromBSONNsFieldMissing) {
StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
BSON("cursor" << BSON("id" << CursorId(123) << "firstBatch"
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -134,8 +125,7 @@ TEST(CursorResponseTest, parseFromBSONNsFieldWrongType) {
StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
BSON("cursor" << BSON("id" << CursorId(123) << "ns" << 456 << "firstBatch"
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -143,10 +133,8 @@ TEST(CursorResponseTest, parseFromBSONIdFieldMissing) {
StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
BSON("cursor" << BSON("ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -156,10 +144,8 @@ TEST(CursorResponseTest, parseFromBSONIdFieldWrongType) {
<< "123"
<< "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -167,19 +153,16 @@ TEST(CursorResponseTest, parseFromBSONBatchFieldMissing) {
StatusWith<CursorResponse> result =
CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
<< "db.coll")
- << "ok"
- << 1));
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONFirstBatchFieldWrongType) {
- StatusWith<CursorResponse> result =
- CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "firstBatch"
- << BSON("_id" << 1))
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "firstBatch" << BSON("_id" << 1))
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -187,19 +170,16 @@ TEST(CursorResponseTest, parseFromBSONNextBatchFieldWrongType) {
StatusWith<CursorResponse> result =
CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON("_id" << 1))
- << "ok"
- << 1));
+ << "nextBatch" << BSON("_id" << 1))
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONOkFieldMissing) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))));
ASSERT_NOT_OK(result.getStatus());
}
@@ -216,13 +196,11 @@ TEST(CursorResponseTest, toBSONInitialResponse) {
std::vector<BSONObj> batch = {BSON("_id" << 1), BSON("_id" << 2)};
CursorResponse response(NamespaceString("testdb.testcoll"), CursorId(123), batch);
BSONObj responseObj = response.toBSON(CursorResponse::ResponseType::InitialResponse);
- BSONObj expectedResponse =
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1.0);
+ BSONObj expectedResponse = BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
@@ -230,13 +208,11 @@ TEST(CursorResponseTest, toBSONSubsequentResponse) {
std::vector<BSONObj> batch = {BSON("_id" << 1), BSON("_id" << 2)};
CursorResponse response(NamespaceString("testdb.testcoll"), CursorId(123), batch);
BSONObj responseObj = response.toBSON(CursorResponse::ResponseType::SubsequentResponse);
- BSONObj expectedResponse =
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1.0);
+ BSONObj expectedResponse = BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
@@ -248,13 +224,11 @@ TEST(CursorResponseTest, addToBSONInitialResponse) {
response.addToBSON(CursorResponse::ResponseType::InitialResponse, &builder);
BSONObj responseObj = builder.obj();
- BSONObj expectedResponse =
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1.0);
+ BSONObj expectedResponse = BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
@@ -266,13 +240,11 @@ TEST(CursorResponseTest, addToBSONSubsequentResponse) {
response.addToBSON(CursorResponse::ResponseType::SubsequentResponse, &builder);
BSONObj responseObj = builder.obj();
- BSONObj expectedResponse =
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1.0);
+ BSONObj expectedResponse = BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
@@ -288,10 +260,8 @@ TEST(CursorResponseTest, serializePostBatchResumeToken) {
<< "db.coll"
<< "nextBatch"
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2))
- << "postBatchResumeToken"
- << postBatchResumeToken)
- << "ok"
- << 1));
+ << "postBatchResumeToken" << postBatchResumeToken)
+ << "ok" << 1));
auto reparsed = CursorResponse::parseFromBSON(serialized);
ASSERT_OK(reparsed.getStatus());
CursorResponse reparsedResponse = std::move(reparsed.getValue());
diff --git a/src/mongo/db/query/datetime/date_time_support.cpp b/src/mongo/db/query/datetime/date_time_support.cpp
index 6ab2a93c976..1705bb600b2 100644
--- a/src/mongo/db/query/datetime/date_time_support.cpp
+++ b/src/mongo/db/query/datetime/date_time_support.cpp
@@ -179,9 +179,7 @@ void TimeZoneDatabase::loadTimeZoneInfo(
40475,
{ErrorCodes::FailedToParse,
str::stream() << "failed to parse time zone file for time zone identifier \""
- << entry.id
- << "\": "
- << timelib_get_error_message(errorCode)});
+ << entry.id << "\": " << timelib_get_error_message(errorCode)});
}
invariant(errorCode == TIMELIB_ERROR_NO_ERROR);
@@ -275,8 +273,7 @@ Date_t TimeZoneDatabase::fromString(StringData dateString,
uasserted(ErrorCodes::ConversionFailure,
str::stream()
<< "an incomplete date/time string has been found, with elements missing: \""
- << dateString
- << "\"");
+ << dateString << "\"");
}
if (!tz.isUtcZone()) {
@@ -294,8 +291,7 @@ Date_t TimeZoneDatabase::fromString(StringData dateString,
ErrorCodes::ConversionFailure,
str::stream()
<< "you cannot pass in a date/time string with time zone information ('"
- << parsedTime.get()->tz_abbr
- << "') together with a timezone argument");
+ << parsedTime.get()->tz_abbr << "') together with a timezone argument");
break;
default: // should technically not be possible to reach
uasserted(ErrorCodes::ConversionFailure,
diff --git a/src/mongo/db/query/datetime/date_time_support.h b/src/mongo/db/query/datetime/date_time_support.h
index 94ac4c4d08e..f5efdcb8fc3 100644
--- a/src/mongo/db/query/datetime/date_time_support.h
+++ b/src/mongo/db/query/datetime/date_time_support.h
@@ -295,8 +295,7 @@ private:
uassert(18537,
str::stream() << "Could not convert date to string: date component was outside "
- << "the supported range of 0-9999: "
- << number,
+ << "the supported range of 0-9999: " << number,
(number >= 0) && (number <= 9999));
int digits = 1;
diff --git a/src/mongo/db/query/datetime/init_timezone_data.cpp b/src/mongo/db/query/datetime/init_timezone_data.cpp
index 970e6db7de4..a9f8fe97ec8 100644
--- a/src/mongo/db/query/datetime/init_timezone_data.cpp
+++ b/src/mongo/db/query/datetime/init_timezone_data.cpp
@@ -50,8 +50,7 @@ ServiceContext::ConstructorActionRegisterer loadTimeZoneDB{
if (!timeZoneDatabase) {
uasserted(ErrorCodes::FailedToParse,
str::stream() << "failed to load time zone database from path \""
- << serverGlobalParams.timeZoneInfoPath
- << "\"");
+ << serverGlobalParams.timeZoneInfoPath << "\"");
}
TimeZoneDatabase::set(service,
std::make_unique<TimeZoneDatabase>(std::move(timeZoneDatabase)));
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index 8317fc50cfc..e6ad7cc0c5c 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -246,4 +246,4 @@ private:
static void generateServerInfo(BSONObjBuilder* out);
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/explain_options.cpp b/src/mongo/db/query/explain_options.cpp
index b9c771de18e..581252ffdfc 100644
--- a/src/mongo/db/query/explain_options.cpp
+++ b/src/mongo/db/query/explain_options.cpp
@@ -72,13 +72,10 @@ StatusWith<ExplainOptions::Verbosity> ExplainOptions::parseCmdBSON(const BSONObj
verbosity = Verbosity::kExecStats;
} else if (verbStr != kAllPlansExecutionVerbosityStr) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "verbosity string must be one of {'"
- << kQueryPlannerVerbosityStr
- << "', '"
- << kExecStatsVerbosityStr
- << "', '"
- << kAllPlansExecutionVerbosityStr
- << "'}");
+ str::stream()
+ << "verbosity string must be one of {'" << kQueryPlannerVerbosityStr
+ << "', '" << kExecStatsVerbosityStr << "', '"
+ << kAllPlansExecutionVerbosityStr << "'}");
}
}
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index f9de0152b5c..97b0640289c 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -337,8 +337,7 @@ Message getMore(OperationContext* opCtx,
// cursor.
uassert(ErrorCodes::Unauthorized,
str::stream() << "Requested getMore on namespace " << ns << ", but cursor " << cursorid
- << " belongs to namespace "
- << cursorPin->nss().ns(),
+ << " belongs to namespace " << cursorPin->nss().ns(),
nss == cursorPin->nss());
// A user can only call getMore on their own cursor. If there were multiple users authenticated
diff --git a/src/mongo/db/query/find_and_modify_request.cpp b/src/mongo/db/query/find_and_modify_request.cpp
index 20f62d2a407..9bf40a1f456 100644
--- a/src/mongo/db/query/find_and_modify_request.cpp
+++ b/src/mongo/db/query/find_and_modify_request.cpp
@@ -171,18 +171,18 @@ StatusWith<FindAndModifyRequest> FindAndModifyRequest::parseFromBSON(NamespaceSt
auto queryElement = cmdObj[kQueryField];
if (queryElement.type() != Object) {
return {ErrorCodes::Error(31160),
- str::stream() << "'" << kQueryField
- << "' parameter must be an object, found "
- << queryElement.type()};
+ str::stream()
+ << "'" << kQueryField << "' parameter must be an object, found "
+ << queryElement.type()};
}
query = queryElement.embeddedObject();
} else if (field == kSortField) {
auto sortElement = cmdObj[kSortField];
if (sortElement.type() != Object) {
return {ErrorCodes::Error(31174),
- str::stream() << "'" << kSortField
- << "' parameter must be an object, found "
- << sortElement.type()};
+ str::stream()
+ << "'" << kSortField << "' parameter must be an object, found "
+ << sortElement.type()};
}
sort = sortElement.embeddedObject();
} else if (field == kRemoveField) {
@@ -195,9 +195,9 @@ StatusWith<FindAndModifyRequest> FindAndModifyRequest::parseFromBSON(NamespaceSt
auto projectionElement = cmdObj[kFieldProjectionField];
if (projectionElement.type() != Object) {
return {ErrorCodes::Error(31175),
- str::stream() << "'" << kFieldProjectionField
- << "' parameter must be an object, found "
- << projectionElement.type()};
+ str::stream()
+ << "'" << kFieldProjectionField
+ << "' parameter must be an object, found " << projectionElement.type()};
}
fields = projectionElement.embeddedObject();
} else if (field == kUpsertField) {
diff --git a/src/mongo/db/query/find_and_modify_request.h b/src/mongo/db/query/find_and_modify_request.h
index a8b350e691f..a5212570755 100644
--- a/src/mongo/db/query/find_and_modify_request.h
+++ b/src/mongo/db/query/find_and_modify_request.h
@@ -117,13 +117,13 @@ public:
//
/**
- * Sets the filter to find a document.
- */
+ * Sets the filter to find a document.
+ */
void setQuery(BSONObj query);
/**
- * Sets the update object that specifies how a document gets updated.
- */
+ * Sets the update object that specifies how a document gets updated.
+ */
void setUpdateObj(BSONObj updateObj);
/**
@@ -134,8 +134,8 @@ public:
void setShouldReturnNew(bool shouldReturnNew);
/**
- * Sets a flag whether the statement performs an upsert.
- */
+ * Sets a flag whether the statement performs an upsert.
+ */
void setUpsert(bool upsert);
//
@@ -210,4 +210,4 @@ private:
// Holds value when performing an update request and none when a remove request.
boost::optional<write_ops::UpdateModification> _update;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index e0f7041eb37..7b20d39a033 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -631,8 +631,9 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorFind(
bool permitYield,
size_t plannerOptions) {
const auto& readConcernArgs = repl::ReadConcernArgs::get(opCtx);
- auto yieldPolicy = (permitYield && (readConcernArgs.getLevel() !=
- repl::ReadConcernLevel::kSnapshotReadConcern))
+ auto yieldPolicy =
+ (permitYield &&
+ (readConcernArgs.getLevel() != repl::ReadConcernLevel::kSnapshotReadConcern))
? PlanExecutor::YIELD_AUTO
: PlanExecutor::INTERRUPT_ONLY;
return _getExecutorFind(
@@ -1370,10 +1371,11 @@ QueryPlannerParams fillOutPlannerParamsForDistinct(OperationContext* opCtx,
const IndexCatalogEntry* ice = ii->next();
const IndexDescriptor* desc = ice->descriptor();
if (desc->keyPattern().hasField(parsedDistinct.getKey())) {
- if (!mayUnwindArrays && isAnyComponentOfPathMultikey(desc->keyPattern(),
- desc->isMultikey(opCtx),
- desc->getMultikeyPaths(opCtx),
- parsedDistinct.getKey())) {
+ if (!mayUnwindArrays &&
+ isAnyComponentOfPathMultikey(desc->keyPattern(),
+ desc->isMultikey(opCtx),
+ desc->getMultikeyPaths(opCtx),
+ parsedDistinct.getKey())) {
// If the caller requested "strict" distinct that does not "pre-unwind" arrays,
// then an index which is multikey on the distinct field may not be used. This is
// because when indexing an array each element gets inserted individually. Any plan
diff --git a/src/mongo/db/query/get_executor_test.cpp b/src/mongo/db/query/get_executor_test.cpp
index 6281cedc01a..d54080debef 100644
--- a/src/mongo/db/query/get_executor_test.cpp
+++ b/src/mongo/db/query/get_executor_test.cpp
@@ -189,14 +189,13 @@ TEST(GetExecutorTest, GetAllowedIndicesDescendingOrder) {
}
TEST(GetExecutorTest, GetAllowedIndicesMatchesByName) {
- testAllowedIndices(
- {buildSimpleIndexEntry(fromjson("{a: 1}"), "a_1"),
- buildSimpleIndexEntry(fromjson("{a: 1}"), "a_1:en")},
- // BSONObjSet default constructor is explicit, so we cannot copy-list-initialize until
- // C++14.
- SimpleBSONObjComparator::kInstance.makeBSONObjSet(),
- {"a_1"},
- {"a_1"});
+ testAllowedIndices({buildSimpleIndexEntry(fromjson("{a: 1}"), "a_1"),
+ buildSimpleIndexEntry(fromjson("{a: 1}"), "a_1:en")},
+ // BSONObjSet default constructor is explicit, so we cannot
+ // copy-list-initialize until C++14.
+ SimpleBSONObjComparator::kInstance.makeBSONObjSet(),
+ {"a_1"},
+ {"a_1"});
}
TEST(GetExecutorTest, GetAllowedIndicesMatchesMultipleIndexesByKey) {
diff --git a/src/mongo/db/query/getmore_request.cpp b/src/mongo/db/query/getmore_request.cpp
index e577671f2fd..e78f6e4e37c 100644
--- a/src/mongo/db/query/getmore_request.cpp
+++ b/src/mongo/db/query/getmore_request.cpp
@@ -84,8 +84,7 @@ Status GetMoreRequest::isValid() const {
if (batchSize && *batchSize <= 0) {
return Status(ErrorCodes::BadValue,
str::stream() << "Batch size for getMore must be positive, "
- << "but received: "
- << *batchSize);
+ << "but received: " << *batchSize);
}
return Status::OK();
@@ -116,8 +115,8 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
} else if (fieldName == kCollectionField) {
if (el.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field 'collection' must be of type string in: "
- << cmdObj};
+ str::stream()
+ << "Field 'collection' must be of type string in: " << cmdObj};
}
BSONElement collElt = cmdObj["collection"];
@@ -155,9 +154,7 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
} else if (!isGenericArgument(fieldName)) {
return {ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj << ". "
- << "Unrecognized field '"
- << fieldName
- << "'."};
+ << "Unrecognized field '" << fieldName << "'."};
}
}
diff --git a/src/mongo/db/query/getmore_request_test.cpp b/src/mongo/db/query/getmore_request_test.cpp
index f9fe0627cbe..78b235153f8 100644
--- a/src/mongo/db/query/getmore_request_test.cpp
+++ b/src/mongo/db/query/getmore_request_test.cpp
@@ -61,8 +61,7 @@ TEST(GetMoreRequestTest, parseFromBSONCursorIdNotLongLong) {
StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db",
BSON("getMore"
<< "not a number"
- << "collection"
- << 123));
+ << "collection" << 123));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
}
@@ -117,8 +116,7 @@ TEST(GetMoreRequestTest, parseFromBSONUnrecognizedFieldName) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "unknown_field"
- << 1));
+ << "unknown_field" << 1));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
}
@@ -128,8 +126,7 @@ TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSize) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize"
- << -1));
+ << "batchSize" << -1));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
}
@@ -139,8 +136,7 @@ TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSizeOfZero) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize"
- << 0));
+ << "batchSize" << 0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
}
@@ -161,8 +157,7 @@ TEST(GetMoreRequestTest, parseFromBSONBatchSizeProvided) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize"
- << 200));
+ << "batchSize" << 200));
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
ASSERT(result.getValue().batchSize);
@@ -186,8 +181,7 @@ TEST(GetMoreRequestTest, parseFromBSONHasMaxTimeMS) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "maxTimeMS"
- << 100));
+ << "maxTimeMS" << 100));
ASSERT_OK(result.getStatus());
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT(result.getValue().awaitDataTimeout);
@@ -200,8 +194,7 @@ TEST(GetMoreRequestTest, parseFromBSONHasMaxTimeMSOfZero) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "maxTimeMS"
- << 0));
+ << "maxTimeMS" << 0));
ASSERT_OK(result.getStatus());
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
@@ -216,8 +209,7 @@ TEST(GetMoreRequestTest, toBSONHasBatchSize) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "batchSize"
- << 99);
+ << "batchSize" << 99);
ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
@@ -240,10 +232,7 @@ TEST(GetMoreRequestTest, toBSONHasTerm) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "batchSize"
- << 99
- << "term"
- << 1);
+ << "batchSize" << 99 << "term" << 1);
ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
@@ -255,14 +244,11 @@ TEST(GetMoreRequestTest, toBSONHasCommitLevel) {
1,
repl::OpTime(Timestamp(0, 10), 2));
BSONObj requestObj = request.toBSON();
- BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
- << "testcoll"
- << "batchSize"
- << 99
- << "term"
- << 1
- << "lastKnownCommittedOpTime"
- << BSON("ts" << Timestamp(0, 10) << "t" << 2LL));
+ BSONObj expectedRequest =
+ BSON("getMore" << CursorId(123) << "collection"
+ << "testcoll"
+ << "batchSize" << 99 << "term" << 1 << "lastKnownCommittedOpTime"
+ << BSON("ts" << Timestamp(0, 10) << "t" << 2LL));
ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
@@ -276,8 +262,7 @@ TEST(GetMoreRequestTest, toBSONHasMaxTimeMS) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "maxTimeMS"
- << 789);
+ << "maxTimeMS" << 789);
ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
diff --git a/src/mongo/db/query/killcursors_request.cpp b/src/mongo/db/query/killcursors_request.cpp
index df44d73043d..5f21b82d489 100644
--- a/src/mongo/db/query/killcursors_request.cpp
+++ b/src/mongo/db/query/killcursors_request.cpp
@@ -67,8 +67,8 @@ StatusWith<KillCursorsRequest> KillCursorsRequest::parseFromBSON(const std::stri
if (cmdObj[kCursorsField].type() != BSONType::Array) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Field '" << kCursorsField << "' must be of type array in: "
- << cmdObj};
+ str::stream() << "Field '" << kCursorsField
+ << "' must be of type array in: " << cmdObj};
}
std::vector<CursorId> cursorIds;
diff --git a/src/mongo/db/query/killcursors_request_test.cpp b/src/mongo/db/query/killcursors_request_test.cpp
index fef544d0b42..d1cdb1f4650 100644
--- a/src/mongo/db/query/killcursors_request_test.cpp
+++ b/src/mongo/db/query/killcursors_request_test.cpp
@@ -95,8 +95,7 @@ TEST(KillCursorsRequestTest, parseFromBSONCursorFieldNotArray) {
KillCursorsRequest::parseFromBSON("db",
BSON("killCursors"
<< "coll"
- << "cursors"
- << CursorId(123)));
+ << "cursors" << CursorId(123)));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -106,21 +105,18 @@ TEST(KillCursorsRequestTest, parseFromBSONCursorFieldEmptyArray) {
KillCursorsRequest::parseFromBSON("db",
BSON("killCursors"
<< "coll"
- << "cursors"
- << BSONArrayBuilder().arr()));
+ << "cursors" << BSONArrayBuilder().arr()));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::BadValue);
}
TEST(KillCursorsRequestTest, parseFromBSONCursorFieldContainsEltOfWrongType) {
- StatusWith<KillCursorsRequest> result =
- KillCursorsRequest::parseFromBSON("db",
- BSON("killCursors"
- << "coll"
- << "cursors"
- << BSON_ARRAY(CursorId(123) << "foo"
- << CursorId(456))));
+ StatusWith<KillCursorsRequest> result = KillCursorsRequest::parseFromBSON(
+ "db",
+ BSON("killCursors"
+ << "coll"
+ << "cursors" << BSON_ARRAY(CursorId(123) << "foo" << CursorId(456))));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -132,8 +128,7 @@ TEST(KillCursorsRequestTest, toBSON) {
BSONObj requestObj = request.toBSON();
BSONObj expectedObj = BSON("killCursors"
<< "coll"
- << "cursors"
- << BSON_ARRAY(CursorId(123) << CursorId(456)));
+ << "cursors" << BSON_ARRAY(CursorId(123) << CursorId(456)));
ASSERT_BSONOBJ_EQ(requestObj, expectedObj);
}
diff --git a/src/mongo/db/query/killcursors_response.cpp b/src/mongo/db/query/killcursors_response.cpp
index 798b2bf8cb0..8b482772b59 100644
--- a/src/mongo/db/query/killcursors_response.cpp
+++ b/src/mongo/db/query/killcursors_response.cpp
@@ -51,8 +51,8 @@ Status fillOutCursorArray(const BSONObj& cmdResponse,
if (elt.type() != BSONType::Array) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Field '" << fieldName << "' must be of type array in: "
- << cmdResponse};
+ str::stream() << "Field '" << fieldName
+ << "' must be of type array in: " << cmdResponse};
}
for (BSONElement cursorElt : elt.Obj()) {
diff --git a/src/mongo/db/query/killcursors_response_test.cpp b/src/mongo/db/query/killcursors_response_test.cpp
index c0c5da3f278..8f091635bb4 100644
--- a/src/mongo/db/query/killcursors_response_test.cpp
+++ b/src/mongo/db/query/killcursors_response_test.cpp
@@ -41,13 +41,9 @@ namespace {
TEST(KillCursorsResponseTest, parseFromBSONSuccess) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6))
- << "cursorsAlive"
+ << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
<< BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "cursorsUnknown"
- << BSONArray()
- << "ok"
- << 1.0));
+ << "cursorsUnknown" << BSONArray() << "ok" << 1.0));
ASSERT_OK(result.getStatus());
KillCursorsResponse response = result.getValue();
ASSERT_EQ(response.cursorsKilled.size(), 1U);
@@ -65,11 +61,8 @@ TEST(KillCursorsResponseTest, parseFromBSONSuccess) {
TEST(KillCursorsResponseTest, parseFromBSONSuccessOmitCursorsAlive) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6))
- << "cursorsUnknown"
- << BSON_ARRAY(CursorId(789))
- << "ok"
- << 1.0));
+ << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsUnknown"
+ << BSON_ARRAY(CursorId(789)) << "ok" << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -84,13 +77,11 @@ TEST(KillCursorsResponseTest, parseFromBSONCommandNotOk) {
}
TEST(KillCursorsResponseTest, parseFromBSONFieldNotArray) {
- StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
- BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << "foobar"
- << "cursorsAlive"
- << BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "ok"
- << 1.0));
+ StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(BSON(
+ "cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
+ << "foobar"
+ << "cursorsAlive" << BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
+ << "ok" << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -98,11 +89,8 @@ TEST(KillCursorsResponseTest, parseFromBSONFieldNotArray) {
TEST(KillCursorsResponseTest, parseFromBSONArrayContainsInvalidElement) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6))
- << "cursorsAlive"
- << BSON_ARRAY(CursorId(7) << "foobar" << CursorId(9))
- << "ok"
- << 1.0));
+ << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
+ << BSON_ARRAY(CursorId(7) << "foobar" << CursorId(9)) << "ok" << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -116,13 +104,9 @@ TEST(KillCursorsResponseTest, toBSON) {
BSONObj responseObj = response.toBSON();
BSONObj expectedResponse =
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6))
- << "cursorsAlive"
+ << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
<< BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "cursorsUnknown"
- << BSONArray()
- << "ok"
- << 1.0);
+ << "cursorsUnknown" << BSONArray() << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
diff --git a/src/mongo/db/query/parsed_distinct.cpp b/src/mongo/db/query/parsed_distinct.cpp
index ca72257d6d3..226754acba4 100644
--- a/src/mongo/db/query/parsed_distinct.cpp
+++ b/src/mongo/db/query/parsed_distinct.cpp
@@ -293,11 +293,10 @@ StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* opCtx,
if (auto readConcernElt = cmdObj[repl::ReadConcernArgs::kReadConcernFieldName]) {
if (readConcernElt.type() != BSONType::Object) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "\"" << repl::ReadConcernArgs::kReadConcernFieldName
- << "\" had the wrong type. Expected "
- << typeName(BSONType::Object)
- << ", found "
- << typeName(readConcernElt.type()));
+ str::stream()
+ << "\"" << repl::ReadConcernArgs::kReadConcernFieldName
+ << "\" had the wrong type. Expected " << typeName(BSONType::Object)
+ << ", found " << typeName(readConcernElt.type()));
}
qr->setReadConcern(readConcernElt.embeddedObject());
}
@@ -305,11 +304,10 @@ StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* opCtx,
if (auto queryOptionsElt = cmdObj[QueryRequest::kUnwrappedReadPrefField]) {
if (queryOptionsElt.type() != BSONType::Object) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "\"" << QueryRequest::kUnwrappedReadPrefField
- << "\" had the wrong type. Expected "
- << typeName(BSONType::Object)
- << ", found "
- << typeName(queryOptionsElt.type()));
+ str::stream()
+ << "\"" << QueryRequest::kUnwrappedReadPrefField
+ << "\" had the wrong type. Expected " << typeName(BSONType::Object)
+ << ", found " << typeName(queryOptionsElt.type()));
}
qr->setUnwrappedReadPref(queryOptionsElt.embeddedObject());
}
diff --git a/src/mongo/db/query/parsed_distinct_test.cpp b/src/mongo/db/query/parsed_distinct_test.cpp
index bf48d19439e..dd6e501ed24 100644
--- a/src/mongo/db/query/parsed_distinct_test.cpp
+++ b/src/mongo/db/query/parsed_distinct_test.cpp
@@ -73,10 +73,10 @@ TEST(ParsedDistinctTest, ConvertToAggregationNoQuery) {
std::vector<BSONObj> expectedPipeline{
BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true)),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$x")))};
+ << "preserveNullAndEmptyArrays" << true)),
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$x")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
@@ -113,23 +113,21 @@ TEST(ParsedDistinctTest, ConvertToAggregationDottedPathNoQuery) {
std::vector<BSONObj> expectedPipeline{
BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true)),
+ << "preserveNullAndEmptyArrays" << true)),
BSON("$unwind" << BSON("path"
<< "$x.y"
- << "preserveNullAndEmptyArrays"
- << true)),
+ << "preserveNullAndEmptyArrays" << true)),
BSON("$unwind" << BSON("path"
<< "$x.y.z"
- << "preserveNullAndEmptyArrays"
- << true)),
+ << "preserveNullAndEmptyArrays" << true)),
BSON("$match" << BSON("x" << BSON("$_internalSchemaType"
<< "object")
<< "x.y"
<< BSON("$_internalSchemaType"
<< "object"))),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$x.y.z")))};
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$x.y.z")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
@@ -159,9 +157,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithAllOptions) {
<< "secondary")
<< "comment"
<< "aComment"
- << "maxTimeMS"
- << 100
- << "$db"
+ << "maxTimeMS" << 100 << "$db"
<< "testdb"),
ExtensionsCallbackNoop(),
!isExplain);
@@ -190,10 +186,10 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithAllOptions) {
std::vector<BSONObj> expectedPipeline{
BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true)),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$x")))};
+ << "preserveNullAndEmptyArrays" << true)),
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$x")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
@@ -232,10 +228,10 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
BSON("$match" << BSON("z" << 7)),
BSON("$unwind" << BSON("path"
<< "$y"
- << "preserveNullAndEmptyArrays"
- << true)),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$y")))};
+ << "preserveNullAndEmptyArrays" << true)),
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$y")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
@@ -269,10 +265,10 @@ TEST(ParsedDistinctTest, ExplainNotIncludedWhenConvertingToAggregationCommand) {
std::vector<BSONObj> expectedPipeline{
BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true)),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$x")))};
+ << "preserveNullAndEmptyArrays" << true)),
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$x")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
diff --git a/src/mongo/db/query/parsed_projection.cpp b/src/mongo/db/query/parsed_projection.cpp
index aaa3bd36f3d..359ad5c23d8 100644
--- a/src/mongo/db/query/parsed_projection.cpp
+++ b/src/mongo/db/query/parsed_projection.cpp
@@ -34,8 +34,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
/**
* Parses the projection 'spec' and checks its validity with respect to the query 'query'.
@@ -297,9 +297,9 @@ Status ParsedProjection::make(OperationContext* opCtx,
// $meta sortKey should not be checked as a part of _requiredFields, since it can
// potentially produce a covered projection as long as the sort key is covered.
if (BSONType::Object == elt.type()) {
- dassert(
- SimpleBSONObjComparator::kInstance.evaluate(elt.Obj() == BSON("$meta"
- << "sortKey")));
+ dassert(SimpleBSONObjComparator::kInstance.evaluate(elt.Obj() ==
+ BSON("$meta"
+ << "sortKey")));
continue;
}
if (elt.trueValue()) {
diff --git a/src/mongo/db/query/parsed_projection_test.cpp b/src/mongo/db/query/parsed_projection_test.cpp
index 075858687fd..990b665d6ed 100644
--- a/src/mongo/db/query/parsed_projection_test.cpp
+++ b/src/mongo/db/query/parsed_projection_test.cpp
@@ -38,8 +38,8 @@
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
using namespace mongo;
@@ -62,8 +62,7 @@ unique_ptr<ParsedProjection> createParsedProjection(const BSONObj& query, const
Status status = ParsedProjection::make(opCtx.get(), projObj, queryMatchExpr.get(), &out);
if (!status.isOK()) {
FAIL(str::stream() << "failed to parse projection " << projObj << " (query: " << query
- << "): "
- << status.toString());
+ << "): " << status.toString());
}
ASSERT(out);
return unique_ptr<ParsedProjection>(out);
diff --git a/src/mongo/db/query/plan_cache_indexability.cpp b/src/mongo/db/query/plan_cache_indexability.cpp
index 9e0d9f717c6..71d1fa456ce 100644
--- a/src/mongo/db/query/plan_cache_indexability.cpp
+++ b/src/mongo/db/query/plan_cache_indexability.cpp
@@ -92,7 +92,7 @@ bool nodeIsConservativelySupportedBySparseIndex(const MatchExpression* me) {
const bool inElemMatch = false;
return QueryPlannerIXSelect::nodeIsSupportedBySparseIndex(me, inElemMatch);
}
-}
+} // namespace
void PlanCacheIndexabilityState::processSparseIndex(const std::string& indexName,
const BSONObj& keyPattern) {
diff --git a/src/mongo/db/query/plan_cache_indexability_test.cpp b/src/mongo/db/query/plan_cache_indexability_test.cpp
index d4d91dfe7f9..48116f58416 100644
--- a/src/mongo/db/query/plan_cache_indexability_test.cpp
+++ b/src/mongo/db/query/plan_cache_indexability_test.cpp
@@ -47,8 +47,8 @@ std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj,
expCtx->setCollator(collator);
StatusWithMatchExpression status = MatchExpressionParser::parse(obj, std::move(expCtx));
if (!status.isOK()) {
- FAIL(str::stream() << "failed to parse query: " << obj.toString() << ". Reason: "
- << status.getStatus().toString());
+ FAIL(str::stream() << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString());
}
return std::move(status.getValue());
}
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index a2ab4e1f475..d5f63f37f24 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -1337,8 +1337,7 @@ TEST_F(CachePlanSelectionTest, Or2DSphereNonNear) {
TEST_F(CachePlanSelectionTest, AndWithinPolygonWithinCenterSphere) {
addIndex(BSON("a"
<< "2dsphere"
- << "b"
- << 1),
+ << "b" << 1),
"a_2dsphere_b_2dsphere");
BSONObj query = fromjson(
diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp
index 7163d69e474..f213e98c6c9 100644
--- a/src/mongo/db/query/plan_enumerator.cpp
+++ b/src/mongo/db/query/plan_enumerator.cpp
@@ -41,10 +41,10 @@
namespace {
using namespace mongo;
-using std::unique_ptr;
using std::endl;
using std::set;
using std::string;
+using std::unique_ptr;
using std::vector;
std::string getPathPrefix(std::string path) {
@@ -668,9 +668,9 @@ bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
// multikey information.
invariant(INDEX_2DSPHERE == thisIndex.type);
- if (predsOverLeadingField.end() != std::find(predsOverLeadingField.begin(),
- predsOverLeadingField.end(),
- mandatoryPred)) {
+ if (predsOverLeadingField.end() !=
+ std::find(
+ predsOverLeadingField.begin(), predsOverLeadingField.end(), mandatoryPred)) {
// The mandatory predicate is on the leading field of 'thisIndex'. We assign it to
// 'thisIndex' and skip assigning any other predicates on the leading field to
// 'thisIndex' because no additional predicate on the leading field will generate a
@@ -722,9 +722,9 @@ bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
}
} else if (thisIndex.multikey) {
// Special handling for multikey mandatory indices.
- if (predsOverLeadingField.end() != std::find(predsOverLeadingField.begin(),
- predsOverLeadingField.end(),
- mandatoryPred)) {
+ if (predsOverLeadingField.end() !=
+ std::find(
+ predsOverLeadingField.begin(), predsOverLeadingField.end(), mandatoryPred)) {
// The mandatory predicate is over the first field of the index. Assign
// it now.
indexAssign.preds.push_back(mandatoryPred);
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index 7720824f7f7..3487e955675 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -46,9 +46,9 @@
namespace mongo {
-using std::unique_ptr;
using std::endl;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace dps = ::mongo::dotted_path_support;
diff --git a/src/mongo/db/query/planner_ixselect.cpp b/src/mongo/db/query/planner_ixselect.cpp
index 84b7616a24f..1e2adddbd3f 100644
--- a/src/mongo/db/query/planner_ixselect.cpp
+++ b/src/mongo/db/query/planner_ixselect.cpp
@@ -682,13 +682,14 @@ void QueryPlannerIXSelect::_rateIndices(MatchExpression* node,
const IndexEntry& index = indices[i];
std::size_t keyPatternIndex = 0;
for (auto&& keyPatternElt : index.keyPattern) {
- if (keyPatternElt.fieldNameStringData() == fullPath && _compatible(keyPatternElt,
- index,
- keyPatternIndex,
- node,
- fullPath,
- collator,
- elemMatchCtx)) {
+ if (keyPatternElt.fieldNameStringData() == fullPath &&
+ _compatible(keyPatternElt,
+ index,
+ keyPatternIndex,
+ node,
+ fullPath,
+ collator,
+ elemMatchCtx)) {
if (keyPatternIndex == 0) {
rt->first.push_back(i);
} else {
diff --git a/src/mongo/db/query/planner_ixselect_test.cpp b/src/mongo/db/query/planner_ixselect_test.cpp
index e80eddd187b..e1018a87944 100644
--- a/src/mongo/db/query/planner_ixselect_test.cpp
+++ b/src/mongo/db/query/planner_ixselect_test.cpp
@@ -51,8 +51,8 @@ namespace {
constexpr CollatorInterface* kSimpleCollator = nullptr;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
/**
@@ -1131,8 +1131,7 @@ TEST(QueryPlannerIXSelectTest, InternalExprEqCanUseHashedIndex) {
TEST(QueryPlannerIXSelectTest, InternalExprEqCannotUseTextIndexPrefix) {
auto entry = buildSimpleIndexEntry(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
std::vector<IndexEntry> indices;
indices.push_back(entry);
std::set<size_t> expectedIndices;
@@ -1143,10 +1142,7 @@ TEST(QueryPlannerIXSelectTest, InternalExprEqCannotUseTextIndexPrefix) {
TEST(QueryPlannerIXSelectTest, InternalExprEqCanUseTextIndexSuffix) {
auto entry = buildSimpleIndexEntry(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1
- << "a"
- << 1));
+ << "_ftsx" << 1 << "a" << 1));
std::vector<IndexEntry> indices;
indices.push_back(entry);
std::set<size_t> expectedIndices = {0};
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 9735dbade0e..e02cae41ec2 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -58,8 +58,8 @@
namespace mongo {
-using std::unique_ptr;
using std::numeric_limits;
+using std::unique_ptr;
namespace dps = ::mongo::dotted_path_support;
@@ -520,8 +520,8 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache(
auto soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, std::move(solnRoot));
if (!soln) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Failed to analyze plan from cache. Query: "
- << query.toStringShort());
+ str::stream()
+ << "Failed to analyze plan from cache. Query: " << query.toStringShort());
}
LOG(5) << "Planner: solution constructed from the cache:\n" << redact(soln->toString());
@@ -610,11 +610,10 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
}
if (fullIndexList.size() > 1) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "Hint matched multiple indexes, "
- << "must hint by index name. Matched: "
- << fullIndexList[0].toString()
- << " and "
- << fullIndexList[1].toString());
+ str::stream()
+ << "Hint matched multiple indexes, "
+ << "must hint by index name. Matched: " << fullIndexList[0].toString()
+ << " and " << fullIndexList[1].toString());
}
hintedIndexEntry.emplace(fullIndexList.front());
diff --git a/src/mongo/db/query/query_planner_geo_test.cpp b/src/mongo/db/query/query_planner_geo_test.cpp
index c70ec258481..b23c40a64fe 100644
--- a/src/mongo/db/query/query_planner_geo_test.cpp
+++ b/src/mongo/db/query/query_planner_geo_test.cpp
@@ -89,8 +89,7 @@ TEST_F(QueryPlannerTest, Basic2DSphereCompound) {
TEST_F(QueryPlannerTest, Basic2DCompound) {
addIndex(BSON("loc"
<< "2d"
- << "a"
- << 1));
+ << "a" << 1));
runQuery(
fromjson("{ loc: { $geoWithin: { $box : [[0, 0],[10, 10]] } },"
@@ -247,8 +246,7 @@ TEST_F(QueryPlannerTest, Multikey2DSphereGeoNearReverseCompound) {
TEST_F(QueryPlannerTest, 2DNonNearContainedOr) {
addIndex(BSON("a"
<< "2d"
- << "x"
- << 1));
+ << "x" << 1));
addIndex(BSON("y" << 1));
runQuery(
fromjson("{$and: [{x: 1}, {$or: [{a: {$within: {$polygon: [[0, 0], [0, 1], [1, 0], [0, "
@@ -649,10 +647,7 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearCompoundTest) {
// true means multikey
addIndex(BSON("a" << 1 << "b"
<< "2dsphere"
- << "c"
- << 1
- << "d"
- << 1),
+ << "c" << 1 << "d" << 1),
true);
runQuery(
fromjson("{a: {$gte: 0}, c: {$gte: 0, $lt: 4}, d: {$gt: 1, $lt: 5},"
@@ -671,8 +666,7 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DNear) {
// true means multikey
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
true);
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$gte: 0}}"));
@@ -1163,10 +1157,7 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{1U}, {1U}, {1U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b"
- << 1
- << "a.c"
- << 1),
+ << "a.b" << 1 << "a.c" << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, 'a.b': 2, 'a.c': 3}"));
@@ -1196,10 +1187,7 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U}, {0U}, {0U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b"
- << 1
- << "a.c"
- << 1),
+ << "a.b" << 1 << "a.c" << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, 'a.b': 2, 'a.c': 3}"));
@@ -1230,10 +1218,7 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U}, {0U}, {0U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b"
- << 1
- << "a.c"
- << 1),
+ << "a.b" << 1 << "a.c" << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, a: {$elemMatch: {b: 2, c: 3}}}"));
@@ -1265,10 +1250,7 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U, 1U}, {0U, 1U}, {0U, 1U}};
addIndex(BSON("a.b.geo"
<< "2dsphere"
- << "a.b.c"
- << 1
- << "a.b.d"
- << 1),
+ << "a.b.c" << 1 << "a.b.d" << 1),
multikeyPaths);
runQuery(fromjson("{'a.b.geo': {$nearSphere: [0, 0]}, a: {$elemMatch: {'b.c': 2, 'b.d': 3}}}"));
@@ -1432,8 +1414,7 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDNearCompound) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("geo"
<< "2dsphere"
- << "nongeo"
- << 1)};
+ << "nongeo" << 1)};
BSONObj predicate = fromjson("{geo: {$nearSphere: [-71.34895, 42.46037]}}");
testMultiple2dsphereIndexVersions(versions, keyPatterns, predicate, 1U);
}
@@ -1444,16 +1425,10 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDSphereSparseBelowOr) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("geo1"
<< "2dsphere"
- << "a"
- << 1
- << "b"
- << 1),
+ << "a" << 1 << "b" << 1),
BSON("geo2"
<< "2dsphere"
- << "a"
- << 1
- << "b"
- << 1)};
+ << "a" << 1 << "b" << 1)};
BSONObj predicate = fromjson(
"{a: 4, b: 5, $or: ["
@@ -1475,8 +1450,7 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDSphereSparseBelowElemMatch) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("a.b"
<< "2dsphere"
- << "a.c"
- << 1)};
+ << "a.c" << 1)};
BSONObj predicate = fromjson(
"{a: {$elemMatch: {b: {$geoWithin: {$centerSphere: [[10,20], 0.01]}},"
@@ -1600,8 +1574,7 @@ TEST_F(QueryPlannerTest, 2dInexactFetchPredicateOverTrailingFieldHandledCorrectl
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(fromjson("{a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: true}}"));
assertNumSolutions(1U);
@@ -1616,8 +1589,7 @@ TEST_F(QueryPlannerTest, 2dInexactFetchPredicateOverTrailingFieldHandledCorrectl
const bool multikey = true;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(fromjson("{a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: true}}"));
@@ -1632,8 +1604,7 @@ TEST_F(QueryPlannerTest, 2dNearInexactFetchPredicateOverTrailingFieldHandledCorr
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$exists: true}}"));
assertNumSolutions(1U);
@@ -1647,8 +1618,7 @@ TEST_F(QueryPlannerTest, 2dNearInexactFetchPredicateOverTrailingFieldMultikey) {
const bool multikey = true;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$exists: true}}"));
@@ -1661,8 +1631,7 @@ TEST_F(QueryPlannerTest, 2dNearWithInternalExprEqOverTrailingField) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$_internalExprEq: 1}}"));
assertNumSolutions(1U);
@@ -1673,8 +1642,7 @@ TEST_F(QueryPlannerTest, 2dNearWithInternalExprEqOverTrailingFieldMultikey) {
const bool multikey = true;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$_internalExprEq: 1}}"));
@@ -1687,8 +1655,7 @@ TEST_F(QueryPlannerTest, 2dGeoWithinWithInternalExprEqOverTrailingField) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(
fromjson("{a: {$within: {$polygon: [[0,0], [2,0], [4,0]]}}, b: {$_internalExprEq: 2}}"));
@@ -1745,8 +1712,7 @@ TEST_F(QueryPlannerTest, 2dsphereNonNearWithInternalExprEqOverTrailingField) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a"
<< "2dsphere"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(
fromjson("{b: {$_internalExprEq: 0}, a: {$geoWithin: {$centerSphere: [[0, 0], 10]}}}"));
@@ -1767,8 +1733,7 @@ TEST_F(QueryPlannerTest, 2dsphereNonNearWithInternalExprEqOverTrailingFieldMulti
const bool multikey = true;
addIndex(BSON("a"
<< "2dsphere"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(
@@ -1791,8 +1756,7 @@ TEST_F(QueryPlannerTest, 2dWithinPredicateOverTrailingFieldElemMatchMultikey) {
const bool multikey = true;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(fromjson("{a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$elemMatch: {c: 1}}}"));
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index 552241ae9ea..3e0cf497f86 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -434,7 +434,7 @@ TEST_F(QueryPlannerTest, NotEqualsNullSparseIndex) {
addIndex(BSON("x" << 1),
false, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{x: {$ne: null}}"));
@@ -449,7 +449,7 @@ TEST_F(QueryPlannerTest, NotEqualsNullSparseMultiKeyIndex) {
addIndex(BSON("x" << 1),
true, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{x: {$ne: null}}"));
@@ -462,7 +462,7 @@ TEST_F(QueryPlannerTest, NotEqualsNullInElemMatchValueSparseMultiKeyIndex) {
addIndex(BSON("x" << 1),
true, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{'x': {$elemMatch: {$ne: null}}}"));
@@ -1674,8 +1674,7 @@ TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSortWithIndexablePred) {
TEST_F(QueryPlannerTest, CantUseTextIndexToProvideSort) {
addIndex(BSON("x" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
ASSERT_EQUALS(getNumSolutions(), 1U);
@@ -2744,7 +2743,7 @@ TEST_F(QueryPlannerTest, NegationCannotUseSparseIndex) {
addIndex(fromjson("{a: 1}"),
false, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{a: {$ne: 5}}"));
assertHasOnlyCollscan();
@@ -2758,7 +2757,7 @@ TEST_F(QueryPlannerTest, NegationInElemMatchDoesNotUseSparseIndex) {
addIndex(fromjson("{a: 1}"),
true, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{a: {$elemMatch: {$ne: 5}}}"));
assertHasOnlyCollscan();
@@ -2770,7 +2769,7 @@ TEST_F(QueryPlannerTest, SparseIndexCannotSupportEqualsNull) {
addIndex(BSON("i" << 1),
false, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{i: {$eq: null}}"));
assertHasOnlyCollscan();
@@ -2784,7 +2783,7 @@ TEST_F(QueryPlannerTest, SparseIndexCanSupportGTEOrLTENull) {
addIndex(BSON("i" << 1),
false, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{i: {$gte: null}}"));
assertNumSolutions(1U);
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index d96e3e822f0..ff4aef1309e 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -548,8 +548,8 @@ std::unique_ptr<MatchExpression> QueryPlannerTest::parseMatchExpression(
expCtx->setCollator(collator);
StatusWithMatchExpression status = MatchExpressionParser::parse(obj, std::move(expCtx));
if (!status.isOK()) {
- FAIL(str::stream() << "failed to parse query: " << obj.toString() << ". Reason: "
- << status.getStatus().toString());
+ FAIL(str::stream() << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString());
}
return std::move(status.getValue());
}
diff --git a/src/mongo/db/query/query_planner_text_test.cpp b/src/mongo/db/query/query_planner_text_test.cpp
index d0b148349ca..ed4b1e45247 100644
--- a/src/mongo/db/query/query_planner_text_test.cpp
+++ b/src/mongo/db/query/query_planner_text_test.cpp
@@ -52,8 +52,7 @@ using namespace mongo;
TEST_F(QueryPlannerTest, SimpleText) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$text: {$search: 'blah'}}"));
assertNumSolutions(1);
@@ -65,8 +64,7 @@ TEST_F(QueryPlannerTest, CantUseTextUnlessHaveTextPred) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a:1}"));
// No table scans allowed so there is no solution.
@@ -79,8 +77,7 @@ TEST_F(QueryPlannerTest, HaveOKPrefixOnTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -99,8 +96,7 @@ TEST_F(QueryPlannerTest, HaveBadPrefixOnTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runInvalidQuery(fromjson("{a:{$gt: 1}, $text:{$search: 'blah'}}"));
runInvalidQuery(fromjson("{$text: {$search: 'blah'}}"));
@@ -113,8 +109,7 @@ TEST_F(QueryPlannerTest, PrefixOnTextIndexIsOutsidePred) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
addIndex(BSON("b" << 1));
runInvalidQuery(fromjson("{$and: [{a: 5}, {$or: [{$text: {$search: 'blah'}}, {b: 6}]}]}"));
}
@@ -124,8 +119,7 @@ TEST_F(QueryPlannerTest, ManyPrefixTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
// Both points.
runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
@@ -150,10 +144,7 @@ TEST_F(QueryPlannerTest, SuffixOptional) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -168,10 +159,7 @@ TEST_F(QueryPlannerTest, RemoveFromSubtree) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
runQuery(fromjson("{a:1, $or: [{a:1}, {b:7}], $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -187,8 +175,7 @@ TEST_F(QueryPlannerTest, CompoundPrefixEvenIfMultikey) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1),
+ << "_ftsx" << 1),
true);
// Both points.
@@ -201,10 +188,7 @@ TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafPrefix) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
// 'a' is not an EQ so it doesn't compound w/the text pred. We also shouldn't use the text
// index to satisfy it w/o the text query.
@@ -215,10 +199,7 @@ TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafSuffixNoPrefix) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
runQuery(fromjson("{b:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -228,8 +209,7 @@ TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$and: [{a: 3}, {$text: {$search: 'foo'}}], a: 3}"));
assertNumSolutions(1U);
@@ -242,8 +222,7 @@ TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndexAndMultiplePredsOnIndexPr
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$and: [{a: 1}, {a: 2}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
@@ -257,8 +236,7 @@ TEST_F(QueryPlannerTest, TextInsideOrBasic) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a: 0, $or: [{_id: 1}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
@@ -274,8 +252,7 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$and: [{$or: [{a: 3}, {a: 4}]}, "
"{$or: [{$text: {$search: 'foo'}}, {a: 5}]}]}"));
@@ -294,8 +271,7 @@ TEST_F(QueryPlannerTest, TextInsideOrOfAnd) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
"{a: {$gt: 3}, $text: {$search: 'foo'}}]}"));
@@ -316,8 +292,7 @@ TEST_F(QueryPlannerTest, TextInsideAndOrAnd) {
addIndex(BSON("b" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{a: 1, $or: [{a:2}, {b:2}, "
"{a: 1, $text: {$search: 'foo'}}]}"));
@@ -336,8 +311,7 @@ TEST_F(QueryPlannerTest, TextInsideAndOrAndOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
"{a: {$gt: 3}, $or: [{$text: {$search: 'foo'}}, "
@@ -360,8 +334,7 @@ TEST_F(QueryPlannerTest, TextInsideOrOneBranchNotIndexed) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a: 1, $or: [{b: 2}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(0);
@@ -374,8 +347,7 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$and: [{$or: [{a: 1}, {b: 1}]}, "
"{$or: [{a: 2}, {$text: {$search: 'foo'}}]}]}"));
@@ -390,8 +362,7 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$text: {$search: 'foo'}, a: {$geoIntersects: {$geometry: "
"{type: 'Point', coordinates: [3.0, 1.0]}}}}"));
@@ -405,8 +376,7 @@ TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
TEST_F(QueryPlannerTest, OrTextExact) {
addIndex(BSON("pre" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
addIndex(BSON("other" << 1));
runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: 2}]}"));
@@ -421,8 +391,7 @@ TEST_F(QueryPlannerTest, OrTextExact) {
TEST_F(QueryPlannerTest, OrTextInexactCovered) {
addIndex(BSON("pre" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
addIndex(BSON("other" << 1));
runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: /bar/}]}"));
@@ -437,8 +406,7 @@ TEST_F(QueryPlannerTest, OrTextInexactCovered) {
TEST_F(QueryPlannerTest, TextCaseSensitive) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$text: {$search: 'blah', $caseSensitive: true}}"));
assertNumSolutions(1);
@@ -448,8 +416,7 @@ TEST_F(QueryPlannerTest, TextCaseSensitive) {
TEST_F(QueryPlannerTest, TextDiacriticSensitive) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$text: {$search: 'blah', $diacriticSensitive: true}}"));
assertNumSolutions(1);
@@ -459,8 +426,7 @@ TEST_F(QueryPlannerTest, TextDiacriticSensitive) {
TEST_F(QueryPlannerTest, SortKeyMetaProjectionWithTextScoreMetaSort) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuerySortProj(fromjson("{$text: {$search: 'foo'}}"),
fromjson("{a: {$meta: 'textScore'}}"),
@@ -477,8 +443,7 @@ TEST_F(QueryPlannerTest, PredicatesOverLeadingFieldsWithSharedPathPrefixHandledC
const bool multikey = true;
addIndex(BSON("a.x" << 1 << "a.y" << 1 << "b.x" << 1 << "b.y" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1),
+ << "_ftsx" << 1),
multikey);
runQuery(fromjson("{'a.x': 1, 'a.y': 2, 'b.x': 3, 'b.y': 4, $text: {$search: 'foo'}}"));
@@ -491,8 +456,7 @@ TEST_F(QueryPlannerTest, PredicatesOverLeadingFieldsWithSharedPathPrefixHandledC
TEST_F(QueryPlannerTest, EqualityToArrayOverLeadingFieldHandledCorrectly) {
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a: [1, 2, 3], $text: {$search: 'foo'}}"));
@@ -504,8 +468,7 @@ TEST_F(QueryPlannerTest, EqualityToArrayOverLeadingFieldHandledCorrectlyWithMult
const bool multikey = true;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1),
+ << "_ftsx" << 1),
multikey);
runQuery(fromjson("{a: [1, 2, 3], $text: {$search: 'foo'}}"));
@@ -517,10 +480,7 @@ TEST_F(QueryPlannerTest, EqualityToArrayOverLeadingFieldHandledCorrectlyWithMult
TEST_F(QueryPlannerTest, InexactFetchPredicateOverTrailingFieldHandledCorrectly) {
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
runQuery(fromjson("{a: 3, $text: {$search: 'foo'}, b: {$exists: true}}"));
@@ -533,10 +493,7 @@ TEST_F(QueryPlannerTest, InexactFetchPredicateOverTrailingFieldHandledCorrectlyM
const bool multikey = true;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1),
+ << "_ftsx" << 1 << "b" << 1),
multikey);
runQuery(fromjson("{a: 3, $text: {$search: 'foo'}, b: {$exists: true}}"));
@@ -550,8 +507,7 @@ TEST_F(QueryPlannerTest, ExprEqCannotUsePrefixOfTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runInvalidQuery(fromjson("{a: {$_internalExprEq: 3}, $text: {$search: 'blah'}}"));
}
@@ -560,10 +516,7 @@ TEST_F(QueryPlannerTest, ExprEqCanUseSuffixOfTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1
- << "a"
- << 1));
+ << "_ftsx" << 1 << "a" << 1));
runQuery(fromjson("{a: {$_internalExprEq: 3}, $text: {$search: 'blah'}}"));
diff --git a/src/mongo/db/query/query_planner_wildcard_index_test.cpp b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
index d0fd0def30e..eba458736af 100644
--- a/src/mongo/db/query/query_planner_wildcard_index_test.cpp
+++ b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
@@ -901,8 +901,7 @@ TEST_F(QueryPlannerWildcardTest, WildcardIndexDoesNotSupplyCandidatePlanForTextS
addWildcardIndex(BSON("$**" << 1));
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
// Confirm that the wildcard index generates candidate plans for queries which do not include a
// $text predicate.
diff --git a/src/mongo/db/query/query_request.cpp b/src/mongo/db/query/query_request.cpp
index 4fc31cd4965..b3c87b40ab8 100644
--- a/src/mongo/db/query/query_request.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -413,9 +413,7 @@ StatusWith<unique_ptr<QueryRequest>> QueryRequest::parseFromFindCommand(unique_p
} else if (!isGenericArgument(fieldName)) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
- << "Unrecognized field '"
- << fieldName
- << "'.");
+ << "Unrecognized field '" << fieldName << "'.");
}
}
@@ -663,26 +661,26 @@ Status QueryRequest::validate() const {
if (_limit && *_limit < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Limit value must be non-negative, but received: "
- << *_limit);
+ str::stream()
+ << "Limit value must be non-negative, but received: " << *_limit);
}
if (_batchSize && *_batchSize < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "BatchSize value must be non-negative, but received: "
- << *_batchSize);
+ str::stream()
+ << "BatchSize value must be non-negative, but received: " << *_batchSize);
}
if (_ntoreturn && *_ntoreturn < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "NToReturn value must be non-negative, but received: "
- << *_ntoreturn);
+ str::stream()
+ << "NToReturn value must be non-negative, but received: " << *_ntoreturn);
}
if (_maxTimeMS < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "MaxTimeMS value must be non-negative, but received: "
- << _maxTimeMS);
+ str::stream()
+ << "MaxTimeMS value must be non-negative, but received: " << _maxTimeMS);
}
if (_tailableMode != TailableModeEnum::kNormal) {
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index 7ee502140f3..e4f9989b44f 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -1571,5 +1571,5 @@ TEST_F(QueryRequestTest, ParseFromUUID) {
ASSERT_EQ(nss, qr.nss());
}
-} // namespace mongo
} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/query_settings_test.cpp b/src/mongo/db/query/query_settings_test.cpp
index 41cb1cc0c3d..6a6d0dce66f 100644
--- a/src/mongo/db/query/query_settings_test.cpp
+++ b/src/mongo/db/query/query_settings_test.cpp
@@ -42,9 +42,9 @@
using mongo::AllowedIndicesFilter;
using mongo::BSONObj;
+using mongo::fromjson;
using mongo::IndexEntry;
using mongo::SimpleBSONObjComparator;
-using mongo::fromjson;
namespace {
TEST(QuerySettingsTest, AllowedIndicesFilterAllowsIndexesByName) {
@@ -113,4 +113,4 @@ TEST(QuerySettingsTest, AllowedIndicesFilterAllowsIndexesByKeyPattern) {
ASSERT_TRUE(filter.allows(a_idx));
ASSERT_FALSE(filter.allows(ab_idx));
}
-}
+} // namespace
diff --git a/src/mongo/db/query/query_solution.cpp b/src/mongo/db/query/query_solution.cpp
index 331e94875dd..ddbe87074a8 100644
--- a/src/mongo/db/query/query_solution.cpp
+++ b/src/mongo/db/query/query_solution.cpp
@@ -154,7 +154,7 @@ void addEqualityFieldSorts(const BSONObj& sortPattern,
sortsOut->insert(prefixBob.obj());
}
}
-}
+} // namespace
string QuerySolutionNode::toString() const {
str::stream ss;
diff --git a/src/mongo/db/query/query_solution_test.cpp b/src/mongo/db/query/query_solution_test.cpp
index 5a143a5a5c5..420c9b0efd0 100644
--- a/src/mongo/db/query/query_solution_test.cpp
+++ b/src/mongo/db/query/query_solution_test.cpp
@@ -728,8 +728,7 @@ auto createMatchExprAndParsedProjection(const BSONObj& query, const BSONObj& pro
ParsedProjection::make(opCtx.get(), projObj, queryMatchExpr.getValue().get(), &out);
if (!status.isOK()) {
FAIL(str::stream() << "failed to parse projection " << projObj << " (query: " << query
- << "): "
- << status.toString());
+ << "): " << status.toString());
}
ASSERT(out);
return std::make_pair(std::move(queryMatchExpr.getValue()),
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index 2e73e2509ef..dcba367378b 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -98,10 +98,9 @@ PlanStage* buildStages(OperationContext* opCtx,
auto descriptor = collection->getIndexCatalog()->findIndexByName(
opCtx, ixn->index.identifier.catalogName);
invariant(descriptor,
- str::stream() << "Namespace: " << collection->ns() << ", CanonicalQuery: "
- << cq.toStringShort()
- << ", IndexEntry: "
- << ixn->index.toString());
+ str::stream() << "Namespace: " << collection->ns()
+ << ", CanonicalQuery: " << cq.toStringShort()
+ << ", IndexEntry: " << ixn->index.toString());
// We use the node's internal name, keyPattern and multikey details here. For $**
// indexes, these may differ from the information recorded in the index's descriptor.
diff --git a/src/mongo/db/read_concern.h b/src/mongo/db/read_concern.h
index 7bd7594e143..c9ac7f08e1c 100644
--- a/src/mongo/db/read_concern.h
+++ b/src/mongo/db/read_concern.h
@@ -42,7 +42,7 @@ enum class PrepareConflictBehavior;
namespace repl {
class ReadConcernArgs;
class SpeculativeMajorityReadInfo;
-}
+} // namespace repl
/**
* Given the specified read concern arguments, performs checks that the read concern can actually be
diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp
index 7844f28ebea..ea270fef283 100644
--- a/src/mongo/db/read_concern_mongod.cpp
+++ b/src/mongo/db/read_concern_mongod.cpp
@@ -29,7 +29,6 @@
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
-#include "mongo/db/read_concern.h"
#include "mongo/base/status.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
@@ -37,6 +36,7 @@
#include "mongo/db/logical_clock.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/read_concern.h"
#include "mongo/db/read_concern_mongod_gen.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/repl_client_info.h"
@@ -168,10 +168,9 @@ Status makeNoopWriteIfNeeded(OperationContext* opCtx, LogicalTime clusterTime) {
opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
"admin",
- BSON("appendOplogNote" << 1 << "maxClusterTime" << clusterTime.asTimestamp()
- << "data"
- << BSON("noop write for afterClusterTime read concern"
- << 1)),
+ BSON("appendOplogNote"
+ << 1 << "maxClusterTime" << clusterTime.asTimestamp() << "data"
+ << BSON("noop write for afterClusterTime read concern" << 1)),
Shard::RetryPolicy::kIdempotent);
status = swRes.getStatus();
std::get<1>(myWriteRequest)->set(status);
@@ -295,8 +294,7 @@ MONGO_REGISTER_SHIM(waitForReadConcern)
<< " value must not be greater than the current clusterTime. "
"Requested clusterTime: "
<< targetClusterTime->toString()
- << "; current clusterTime: "
- << currentTime.toString()};
+ << "; current clusterTime: " << currentTime.toString()};
}
auto status = makeNoopWriteIfNeeded(opCtx, *targetClusterTime);
diff --git a/src/mongo/db/read_concern_test.cpp b/src/mongo/db/read_concern_test.cpp
index df078b59aca..50ff8761aeb 100644
--- a/src/mongo/db/read_concern_test.cpp
+++ b/src/mongo/db/read_concern_test.cpp
@@ -48,9 +48,7 @@ using ReadConcernTest = ReplCoordTest;
TEST_F(ReadConcernTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 3ac2f9c6a06..092857b8a81 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -98,10 +98,7 @@ StatusWith<IndexNameObjs> getIndexNameObjs(OperationContext* opCtx,
return Status(
ErrorCodes::CannotCreateIndex,
str::stream()
- << "Cannot rebuild index "
- << spec
- << ": "
- << keyStatus.reason()
+ << "Cannot rebuild index " << spec << ": " << keyStatus.reason()
<< " For more info see http://dochub.mongodb.org/core/index-validation");
}
}
@@ -126,7 +123,7 @@ Status rebuildIndexesOnCollection(OperationContext* opCtx,
return swRebuild.getStatus();
}
- auto[numRecords, dataSize] = swRebuild.getValue();
+ auto [numRecords, dataSize] = swRebuild.getValue();
auto rs = collection->getRecordStore();
diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp
index 609db294b31..01c965cb9d3 100644
--- a/src/mongo/db/repair_database_and_check_version.cpp
+++ b/src/mongo/db/repair_database_and_check_version.cpp
@@ -243,9 +243,9 @@ bool hasReplSetConfigDoc(OperationContext* opCtx) {
}
/**
-* Check that the oplog is capped, and abort the process if it is not.
-* Caller must lock DB before calling this function.
-*/
+ * Check that the oplog is capped, and abort the process if it is not.
+ * Caller must lock DB before calling this function.
+ */
void checkForCappedOplog(OperationContext* opCtx, Database* db) {
const NamespaceString oplogNss(NamespaceString::kRsOplogNamespace);
invariant(opCtx->lockState()->isDbLockedForMode(oplogNss.db(), MODE_IS));
@@ -283,15 +283,13 @@ void rebuildIndexes(OperationContext* opCtx, StorageEngine* storageEngine) {
fassert(40590,
{ErrorCodes::InternalError,
str::stream() << "failed to get index spec for index " << indexName
- << " in collection "
- << collNss.toString()});
+ << " in collection " << collNss.toString()});
}
auto& indexesToRebuild = swIndexSpecs.getValue();
invariant(indexesToRebuild.first.size() == 1 && indexesToRebuild.second.size() == 1,
str::stream() << "Num Index Names: " << indexesToRebuild.first.size()
- << " Num Index Objects: "
- << indexesToRebuild.second.size());
+ << " Num Index Objects: " << indexesToRebuild.second.size());
auto& ino = nsToIndexNameObjMap[collNss.ns()];
ino.first.emplace_back(std::move(indexesToRebuild.first.back()));
ino.second.emplace_back(std::move(indexesToRebuild.second.back()));
@@ -515,8 +513,7 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) {
<< swVersion.getStatus()
<< "). If the current featureCompatibilityVersion is below "
"4.0, see the documentation on upgrading at "
- << feature_compatibility_version_documentation::kUpgradeLink
- << ".",
+ << feature_compatibility_version_documentation::kUpgradeLink << ".",
swVersion.isOK());
fcvDocumentExists = true;
@@ -535,8 +532,9 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) {
<< startupWarningsLog;
log() << "** To fix this, use the setFeatureCompatibilityVersion "
<< "command to resume upgrade to 4.2." << startupWarningsLog;
- } else if (version == ServerGlobalParams::FeatureCompatibility::Version::
- kDowngradingTo40) {
+ } else if (version ==
+ ServerGlobalParams::FeatureCompatibility::Version::
+ kDowngradingTo40) {
log() << "** WARNING: A featureCompatibilityVersion downgrade did not "
<< "complete. " << startupWarningsLog;
log() << "** The current featureCompatibilityVersion is "
diff --git a/src/mongo/db/repl/abstract_async_component.cpp b/src/mongo/db/repl/abstract_async_component.cpp
index 181f2f5ef69..1b99507fc5c 100644
--- a/src/mongo/db/repl/abstract_async_component.cpp
+++ b/src/mongo/db/repl/abstract_async_component.cpp
@@ -189,16 +189,15 @@ Status AbstractAsyncComponent::_scheduleWorkAtAndSaveHandle_inlock(
const std::string& name) {
invariant(handle);
if (_isShuttingDown_inlock()) {
- return Status(
- ErrorCodes::CallbackCanceled,
- str::stream() << "failed to schedule work " << name << " at " << when.toString() << ": "
- << _componentName
- << " is shutting down");
+ return Status(ErrorCodes::CallbackCanceled,
+ str::stream()
+ << "failed to schedule work " << name << " at " << when.toString() << ": "
+ << _componentName << " is shutting down");
}
auto result = _executor->scheduleWorkAt(when, std::move(work));
if (!result.isOK()) {
- return result.getStatus().withContext(
- str::stream() << "failed to schedule work " << name << " at " << when.toString());
+ return result.getStatus().withContext(str::stream() << "failed to schedule work " << name
+ << " at " << when.toString());
}
*handle = result.getValue();
return Status::OK();
diff --git a/src/mongo/db/repl/abstract_async_component.h b/src/mongo/db/repl/abstract_async_component.h
index 8d5e784b591..64d88ad41e8 100644
--- a/src/mongo/db/repl/abstract_async_component.h
+++ b/src/mongo/db/repl/abstract_async_component.h
@@ -247,8 +247,7 @@ Status AbstractAsyncComponent::_startupComponent_inlock(std::unique_ptr<T>& comp
component.reset();
return Status(ErrorCodes::CallbackCanceled,
str::stream() << "failed to start up " << componentToStartUp << ": "
- << _componentName
- << " is shutting down");
+ << _componentName << " is shutting down");
}
auto status = component->startup();
diff --git a/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp b/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp
index f3d44242ffb..882cf5f4fa8 100644
--- a/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp
+++ b/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp
@@ -135,4 +135,4 @@ executor::RemoteCommandRequest AbstractOplogFetcherTest::processNetworkResponse(
}
} // namespace repl
-} // namespace mango
+} // namespace mongo
diff --git a/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h b/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h
index 2164f93cac6..7349689bb32 100644
--- a/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h
+++ b/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h
@@ -97,4 +97,4 @@ protected:
Date_t lastFetchedWall;
};
} // namespace repl
-} // namespace mango
+} // namespace mongo
diff --git a/src/mongo/db/repl/applier_helpers.cpp b/src/mongo/db/repl/applier_helpers.cpp
index 0113057b57a..82a4cade0ed 100644
--- a/src/mongo/db/repl/applier_helpers.cpp
+++ b/src/mongo/db/repl/applier_helpers.cpp
@@ -196,8 +196,7 @@ StatusWith<InsertGroup::ConstIterator> InsertGroup::groupAndApplyInserts(ConstIt
// application of an individual op.
auto status = exceptionToStatus().withContext(
str::stream() << "Error applying inserts in bulk: " << redact(groupedInsertObj)
- << ". Trying first insert as a lone insert: "
- << redact(entry.getRaw()));
+ << ". Trying first insert as a lone insert: " << redact(entry.getRaw()));
// It's not an error during initial sync to encounter DuplicateKey errors.
if (Mode::kInitialSync == _mode && ErrorCodes::DuplicateKey == status) {
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index df72f0faea8..03e0c8ac566 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -157,9 +157,7 @@ Status _applyOps(OperationContext* opCtx,
ErrorCodes::AtomicityFailure,
str::stream()
<< "cannot apply insert or update operation on a non-existent namespace "
- << nss.ns()
- << " in atomic applyOps mode: "
- << redact(opObj));
+ << nss.ns() << " in atomic applyOps mode: " << redact(opObj));
}
// Reject malformed operations in an atomic applyOps.
@@ -169,8 +167,7 @@ Status _applyOps(OperationContext* opCtx,
uasserted(ErrorCodes::AtomicityFailure,
str::stream()
<< "cannot apply a malformed operation in atomic applyOps mode: "
- << redact(opObj)
- << "; will retry without atomicity: "
+ << redact(opObj) << "; will retry without atomicity: "
<< exceptionToStatus().toString());
}
@@ -229,9 +226,7 @@ Status _applyOps(OperationContext* opCtx,
str::stream()
<< "cannot apply insert or update operation on a "
"non-existent namespace "
- << nss.ns()
- << ": "
- << mongo::redact(opObj));
+ << nss.ns() << ": " << mongo::redact(opObj));
}
OldClientContext ctx(opCtx, nss.ns());
diff --git a/src/mongo/db/repl/apply_ops.h b/src/mongo/db/repl/apply_ops.h
index c5cca31569f..8aac61a39b9 100644
--- a/src/mongo/db/repl/apply_ops.h
+++ b/src/mongo/db/repl/apply_ops.h
@@ -116,7 +116,7 @@ Status applyOps(OperationContext* opCtx,
/**
* Applies a non-transactional 'applyOps' oplog entry. That is, an 'applyOps' entry that was not
* generated by a transaction.
-*/
+ */
Status applyApplyOpsOplogEntry(OperationContext* opCtx,
const OplogEntry& entry,
repl::OplogApplication::Mode oplogApplicationMode);
diff --git a/src/mongo/db/repl/apply_ops_test.cpp b/src/mongo/db/repl/apply_ops_test.cpp
index 6416f751a2f..eaa21394384 100644
--- a/src/mongo/db/repl/apply_ops_test.cpp
+++ b/src/mongo/db/repl/apply_ops_test.cpp
@@ -142,17 +142,13 @@ TEST_F(ApplyOpsTest, CommandInNestedApplyOpsReturnsSuccess) {
auto mode = OplogApplication::Mode::kApplyOpsCmd;
BSONObjBuilder resultBuilder;
NamespaceString nss("test", "foo");
- auto innerCmdObj = BSON("op"
- << "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
- << BSON("create" << nss.coll()));
+ auto innerCmdObj =
+ BSON("op"
+ << "c"
+ << "ns" << nss.getCommandNS().ns() << "o" << BSON("create" << nss.coll()));
auto innerApplyOpsObj = BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
+ << "ns" << nss.getCommandNS().ns() << "o"
<< BSON("applyOps" << BSON_ARRAY(innerCmdObj)));
auto cmdObj = BSON("applyOps" << BSON_ARRAY(innerApplyOpsObj));
@@ -170,18 +166,13 @@ TEST_F(ApplyOpsTest, InsertInNestedApplyOpsReturnsSuccess) {
NamespaceString nss("test", "foo");
auto innerCmdObj = BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
+ << "ns" << nss.ns() << "o"
<< BSON("_id"
<< "a")
- << "ui"
- << options.uuid.get());
+ << "ui" << options.uuid.get());
auto innerApplyOpsObj = BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
+ << "ns" << nss.getCommandNS().ns() << "o"
<< BSON("applyOps" << BSON_ARRAY(innerCmdObj)));
auto cmdObj = BSON("applyOps" << BSON_ARRAY(innerApplyOpsObj));
@@ -207,18 +198,10 @@ BSONObj makeApplyOpsWithInsertOperation(const NamespaceString& nss,
const BSONObj& documentToInsert) {
auto insertOp = uuid ? BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
- << documentToInsert
- << "ui"
- << *uuid)
+ << "ns" << nss.ns() << "o" << documentToInsert << "ui" << *uuid)
: BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
- << documentToInsert);
+ << "ns" << nss.ns() << "o" << documentToInsert);
return BSON("applyOps" << BSON_ARRAY(insertOp));
}
@@ -396,53 +379,35 @@ TEST_F(ApplyOpsTest, ExtractOperationsReturnsOperationsWithSameOpTimeAsApplyOps)
auto ui1 = UUID::gen();
auto op1 = BSON("op"
<< "i"
- << "ns"
- << ns1.ns()
- << "ui"
- << ui1
- << "o"
- << BSON("_id" << 1));
+ << "ns" << ns1.ns() << "ui" << ui1 << "o" << BSON("_id" << 1));
NamespaceString ns2("test.b");
auto ui2 = UUID::gen();
auto op2 = BSON("op"
<< "i"
- << "ns"
- << ns2.ns()
- << "ui"
- << ui2
- << "o"
- << BSON("_id" << 2));
+ << "ns" << ns2.ns() << "ui" << ui2 << "o" << BSON("_id" << 2));
NamespaceString ns3("test.c");
auto ui3 = UUID::gen();
auto op3 = BSON("op"
<< "u"
- << "ns"
- << ns3.ns()
- << "ui"
- << ui3
- << "b"
- << true
- << "o"
- << BSON("x" << 1)
- << "o2"
- << BSON("_id" << 3));
+ << "ns" << ns3.ns() << "ui" << ui3 << "b" << true << "o" << BSON("x" << 1)
+ << "o2" << BSON("_id" << 3));
auto oplogEntry =
makeOplogEntry(OpTypeEnum::kCommand, BSON("applyOps" << BSON_ARRAY(op1 << op2 << op3)));
auto operations = ApplyOps::extractOperations(oplogEntry);
- ASSERT_EQUALS(3U, operations.size()) << "Unexpected number of operations extracted: "
- << oplogEntry.toBSON();
+ ASSERT_EQUALS(3U, operations.size())
+ << "Unexpected number of operations extracted: " << oplogEntry.toBSON();
// Check extracted CRUD operations.
auto it = operations.cbegin();
{
ASSERT(operations.cend() != it);
const auto& operation1 = *(it++);
- ASSERT(OpTypeEnum::kInsert == operation1.getOpType()) << "Unexpected op type: "
- << operation1.toBSON();
+ ASSERT(OpTypeEnum::kInsert == operation1.getOpType())
+ << "Unexpected op type: " << operation1.toBSON();
ASSERT_EQUALS(ui1, *operation1.getUuid());
ASSERT_EQUALS(ns1, operation1.getNss());
ASSERT_BSONOBJ_EQ(BSON("_id" << 1), operation1.getOperationToApply());
@@ -454,8 +419,8 @@ TEST_F(ApplyOpsTest, ExtractOperationsReturnsOperationsWithSameOpTimeAsApplyOps)
{
ASSERT(operations.cend() != it);
const auto& operation2 = *(it++);
- ASSERT(OpTypeEnum::kInsert == operation2.getOpType()) << "Unexpected op type: "
- << operation2.toBSON();
+ ASSERT(OpTypeEnum::kInsert == operation2.getOpType())
+ << "Unexpected op type: " << operation2.toBSON();
ASSERT_EQUALS(ui2, *operation2.getUuid());
ASSERT_EQUALS(ns2, operation2.getNss());
ASSERT_BSONOBJ_EQ(BSON("_id" << 2), operation2.getOperationToApply());
@@ -467,8 +432,8 @@ TEST_F(ApplyOpsTest, ExtractOperationsReturnsOperationsWithSameOpTimeAsApplyOps)
{
ASSERT(operations.cend() != it);
const auto& operation3 = *(it++);
- ASSERT(OpTypeEnum::kUpdate == operation3.getOpType()) << "Unexpected op type: "
- << operation3.toBSON();
+ ASSERT(OpTypeEnum::kUpdate == operation3.getOpType())
+ << "Unexpected op type: " << operation3.toBSON();
ASSERT_EQUALS(ui3, *operation3.getUuid());
ASSERT_EQUALS(ns3, operation3.getNss());
ASSERT_BSONOBJ_EQ(BSON("x" << 1), operation3.getOperationToApply());
@@ -496,9 +461,7 @@ TEST_F(ApplyOpsTest, ApplyOpsFailsToDropAdmin) {
auto dropDatabaseOp = BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
+ << "ns" << nss.getCommandNS().ns() << "o"
<< BSON("dropDatabase" << 1));
auto dropDatabaseCmdObj = BSON("applyOps" << BSON_ARRAY(dropDatabaseOp));
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.cpp b/src/mongo/db/repl/base_cloner_test_fixture.cpp
index 7d0dad39e6f..8d9fc63f6cd 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.cpp
+++ b/src/mongo/db/repl/base_cloner_test_fixture.cpp
@@ -47,8 +47,7 @@ const HostAndPort BaseClonerTest::target("localhost", -1);
const NamespaceString BaseClonerTest::nss("db.coll");
const BSONObj BaseClonerTest::idIndexSpec = BSON("v" << 1 << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << nss.ns());
+ << "ns" << nss.ns());
// static
BSONObj BaseClonerTest::createCountResponse(int documentCount) {
diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h
index f6f3dc53d0f..e44427a656d 100644
--- a/src/mongo/db/repl/bgsync.h
+++ b/src/mongo/db/repl/bgsync.h
@@ -221,17 +221,17 @@ private:
ReplicationProcess* _replicationProcess;
/**
- * All member variables are labeled with one of the following codes indicating the
- * synchronization rules for accessing them:
- *
- * (PR) Completely private to BackgroundSync. Can be read or written to from within the main
- * BackgroundSync thread without synchronization. Shouldn't be accessed outside of this
- * thread.
- *
- * (S) Self-synchronizing; access in any way from any context.
- *
- * (M) Reads and writes guarded by _mutex
- *
+ * All member variables are labeled with one of the following codes indicating the
+ * synchronization rules for accessing them:
+ *
+ * (PR) Completely private to BackgroundSync. Can be read or written to from within the main
+ * BackgroundSync thread without synchronization. Shouldn't be accessed outside of this
+ * thread.
+ *
+ * (S) Self-synchronizing; access in any way from any context.
+ *
+ * (M) Reads and writes guarded by _mutex
+ *
*/
// Protects member data of BackgroundSync.
diff --git a/src/mongo/db/repl/check_quorum_for_config_change.cpp b/src/mongo/db/repl/check_quorum_for_config_change.cpp
index 7f6f6af9672..8f2dfc40664 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/repl/scatter_gather_algorithm.h"
#include "mongo/db/repl/scatter_gather_runner.h"
#include "mongo/db/server_options.h"
-#include "mongo/db/server_options.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -199,8 +198,8 @@ void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& reque
Status hbStatus = hbResp.initialize(resBSON, 0, /*requireWallTime*/ false);
if (hbStatus.code() == ErrorCodes::InconsistentReplicaSetNames) {
- std::string message = str::stream() << "Our set name did not match that of "
- << request.target.toString();
+ std::string message = str::stream()
+ << "Our set name did not match that of " << request.target.toString();
_vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
warning() << message;
return;
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index 4658863cb0c..5bb4fefbc08 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -48,18 +48,18 @@
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
-#define ASSERT_REASON_CONTAINS(STATUS, PATTERN) \
- do { \
- const mongo::Status s_ = (STATUS); \
- ASSERT_FALSE(s_.reason().find(PATTERN) == std::string::npos) << #STATUS ".reason() == " \
- << s_.reason(); \
+#define ASSERT_REASON_CONTAINS(STATUS, PATTERN) \
+ do { \
+ const mongo::Status s_ = (STATUS); \
+ ASSERT_FALSE(s_.reason().find(PATTERN) == std::string::npos) \
+ << #STATUS ".reason() == " << s_.reason(); \
} while (false)
-#define ASSERT_NOT_REASON_CONTAINS(STATUS, PATTERN) \
- do { \
- const mongo::Status s_ = (STATUS); \
- ASSERT_TRUE(s_.reason().find(PATTERN) == std::string::npos) << #STATUS ".reason() == " \
- << s_.reason(); \
+#define ASSERT_NOT_REASON_CONTAINS(STATUS, PATTERN) \
+ do { \
+ const mongo::Status s_ = (STATUS); \
+ ASSERT_TRUE(s_.reason().find(PATTERN) == std::string::npos) \
+ << #STATUS ".reason() == " << s_.reason(); \
} while (false)
namespace mongo {
@@ -141,30 +141,24 @@ ReplSetConfig assertMakeRSConfig(const BSONObj& configBson) {
}
TEST_F(CheckQuorumForInitiate, ValidSingleNodeSet) {
- ReplSetConfig config = assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1"))));
+ ReplSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1"))));
startQuorumCheck(config, 0);
ASSERT_OK(waitForQuorumCheck());
}
TEST_F(CheckQuorumForInitiate, QuorumCheckCanceledByShutdown) {
getExecutor().shutdown();
- ReplSetConfig config = assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1"))));
+ ReplSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1"))));
startQuorumCheck(config, 0);
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, waitForQuorumCheck());
}
@@ -173,23 +167,20 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSeveralDownNodes) {
// In this test, "we" are host "h3:1". All other nodes time out on
// their heartbeat request, and so the quorum check for initiate
// will fail because some members were unavailable.
- ReplSetConfig config = assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1")
- << BSON("_id" << 3 << "host"
- << "h3:1")
- << BSON("_id" << 4 << "host"
- << "h4:1")
- << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ ReplSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
startQuorumCheck(config, 2);
getNet()->enterNetwork();
const Date_t startDate = getNet()->now();
@@ -255,11 +246,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckSuccessForFiveNodes) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -283,8 +270,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckSuccessForFiveNodes) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
getNet()->scheduleResponse(
noi, startDate + Milliseconds(10), makeHeartbeatResponse(rsConfig, Milliseconds(8)));
}
@@ -302,19 +289,12 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToOneDownNode) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
<< "h2:1"
- << "priority"
- << 0
- << "votes"
- << 0)
+ << "priority" << 0 << "votes" << 0)
<< BSON("_id" << 3 << "host"
<< "h3:1")
<< BSON("_id" << 4 << "host"
@@ -336,8 +316,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToOneDownNode) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h2", 1)) {
getNet()->scheduleResponse(
noi, startDate + Milliseconds(10), {ErrorCodes::NoSuchKey, "No response"});
@@ -369,11 +349,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetNameMismatch) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -397,8 +373,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetNameMismatch) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h4", 1)) {
getNet()->scheduleResponse(
noi,
@@ -434,11 +410,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -449,8 +421,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
<< "h4:1")
<< BSON("_id" << 5 << "host"
<< "h5:1"))
- << "settings"
- << BSON("replicaSetId" << replicaSetId)));
+ << "settings" << BSON("replicaSetId" << replicaSetId)));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -467,8 +438,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == incompatibleHost) {
OpTime opTime{Timestamp{10, 10}, 10};
Date_t wallTime = Date_t();
@@ -499,10 +470,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
ASSERT_REASON_CONTAINS(status,
str::stream() << "Our replica set ID of " << replicaSetId
- << " did not match that of "
- << incompatibleHost.toString()
- << ", which is "
- << unexpectedId);
+ << " did not match that of " << incompatibleHost.toString()
+ << ", which is " << unexpectedId);
ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
@@ -518,11 +487,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -546,8 +511,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
long long configVersion = 1;
getNet()->scheduleResponse(
@@ -582,11 +547,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespo
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -610,8 +571,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespo
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
long long configVersion = 1;
getNet()->scheduleResponse(
@@ -642,11 +603,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -666,8 +623,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1)) {
long long configVersion = 5;
getNet()->scheduleResponse(
@@ -696,11 +653,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToIncompatibleSetName) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -720,8 +673,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToIncompatibleSetName) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h2", 1)) {
getNet()->scheduleResponse(
noi,
@@ -754,11 +707,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -767,16 +716,10 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
<< "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "votes"
- << 0
- << "priority"
- << 0))));
+ << "votes" << 0 << "priority" << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -790,8 +733,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h5", 1)) {
getNet()->scheduleResponse(noi,
startDate + Milliseconds(10),
@@ -820,11 +763,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -833,12 +772,10 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
<< "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "priority"
- << 0))));
+ << "priority" << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -852,8 +789,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
getNet()->scheduleResponse(noi,
startDate + Milliseconds(10),
@@ -878,11 +815,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -891,16 +824,10 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
<< "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "votes"
- << 0
- << "priority"
- << 0))));
+ << "votes" << 0 << "priority" << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -914,8 +841,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h2", 1)) {
getNet()->scheduleResponse(noi,
startDate + Milliseconds(10),
@@ -938,11 +865,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckProcessesCallbackCanceledResponse) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -962,8 +885,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckProcessesCallbackCanceledResponse) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1)) {
getNet()->scheduleResponse(
noi,
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index 393f41f11f2..cc7719d2374 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -76,42 +76,41 @@ CollectionBulkLoaderImpl::~CollectionBulkLoaderImpl() {
}
Status CollectionBulkLoaderImpl::init(const std::vector<BSONObj>& secondaryIndexSpecs) {
- return _runTaskReleaseResourcesOnFailure(
- [ coll = _autoColl->getCollection(), &secondaryIndexSpecs, this ]()->Status {
- // All writes in CollectionBulkLoaderImpl should be unreplicated.
- // The opCtx is accessed indirectly through _secondaryIndexesBlock.
- UnreplicatedWritesBlock uwb(_opCtx.get());
- // This enforces the buildIndexes setting in the replica set configuration.
- auto indexCatalog = coll->getIndexCatalog();
- auto specs =
- indexCatalog->removeExistingIndexesNoChecks(_opCtx.get(), secondaryIndexSpecs);
- if (specs.size()) {
- _secondaryIndexesBlock->ignoreUniqueConstraint();
- auto status =
- _secondaryIndexesBlock
- ->init(_opCtx.get(), _collection, specs, MultiIndexBlock::kNoopOnInitFn)
- .getStatus();
- if (!status.isOK()) {
- return status;
- }
- } else {
- _secondaryIndexesBlock.reset();
+ return _runTaskReleaseResourcesOnFailure([coll = _autoColl->getCollection(),
+ &secondaryIndexSpecs,
+ this]() -> Status {
+ // All writes in CollectionBulkLoaderImpl should be unreplicated.
+ // The opCtx is accessed indirectly through _secondaryIndexesBlock.
+ UnreplicatedWritesBlock uwb(_opCtx.get());
+ // This enforces the buildIndexes setting in the replica set configuration.
+ auto indexCatalog = coll->getIndexCatalog();
+ auto specs = indexCatalog->removeExistingIndexesNoChecks(_opCtx.get(), secondaryIndexSpecs);
+ if (specs.size()) {
+ _secondaryIndexesBlock->ignoreUniqueConstraint();
+ auto status =
+ _secondaryIndexesBlock
+ ->init(_opCtx.get(), _collection, specs, MultiIndexBlock::kNoopOnInitFn)
+ .getStatus();
+ if (!status.isOK()) {
+ return status;
}
- if (!_idIndexSpec.isEmpty()) {
- auto status =
- _idIndexBlock
- ->init(
- _opCtx.get(), _collection, _idIndexSpec, MultiIndexBlock::kNoopOnInitFn)
- .getStatus();
- if (!status.isOK()) {
- return status;
- }
- } else {
- _idIndexBlock.reset();
+ } else {
+ _secondaryIndexesBlock.reset();
+ }
+ if (!_idIndexSpec.isEmpty()) {
+ auto status =
+ _idIndexBlock
+ ->init(_opCtx.get(), _collection, _idIndexSpec, MultiIndexBlock::kNoopOnInitFn)
+ .getStatus();
+ if (!status.isOK()) {
+ return status;
}
+ } else {
+ _idIndexBlock.reset();
+ }
- return Status::OK();
- });
+ return Status::OK();
+ });
}
Status CollectionBulkLoaderImpl::_insertDocumentsForUncappedCollection(
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index facbf764d95..84c7273f001 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -132,8 +132,8 @@ CollectionCloner::CollectionCloner(executor::TaskExecutor* executor,
_sourceNss.db().toString(),
makeCommandWithUUIDorCollectionName("listIndexes", _options.uuid, sourceNss),
[this](const Fetcher::QueryResponseStatus& fetchResult,
- Fetcher::NextAction * nextAction,
- BSONObjBuilder * getMoreBob) {
+ Fetcher::NextAction* nextAction,
+ BSONObjBuilder* getMoreBob) {
_listIndexesCallback(fetchResult, nextAction, getMoreBob);
},
ReadPreferenceSetting::secondaryPreferredMetadata(),
@@ -332,9 +332,7 @@ void CollectionCloner::_countCallback(
_finishCallback(countStatus.withContext(
str::stream() << "There was an error parsing document count from count "
"command result on collection "
- << _sourceNss.ns()
- << " from "
- << _source.toString()));
+ << _sourceNss.ns() << " from " << _source.toString()));
return;
}
}
@@ -343,8 +341,7 @@ void CollectionCloner::_countCallback(
_finishCallback({ErrorCodes::BadValue,
str::stream() << "Count call on collection " << _sourceNss.ns() << " from "
<< _source.toString()
- << " returned negative document count: "
- << count});
+ << " returned negative document count: " << count});
return;
}
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index 420457bd703..5fb4970c183 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -260,15 +260,15 @@ void CollectionClonerTest::setUp() {
const BSONObj idIndexSpec,
const std::vector<BSONObj>& nonIdIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoaderMock>> {
- auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collectionStats);
- Status result = localLoader->init(nonIdIndexSpecs);
- if (!result.isOK())
- return result;
+ auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collectionStats);
+ Status result = localLoader->init(nonIdIndexSpecs);
+ if (!result.isOK())
+ return result;
- _loader = localLoader.get();
+ _loader = localLoader.get();
- return std::move(localLoader);
- };
+ return std::move(localLoader);
+ };
_server = std::make_unique<MockRemoteDBServer>(target.toString());
_server->assignCollectionUuid(nss.ns(), *options.uuid);
_client = new FailableMockDBClientConnection(_server.get(), getNet());
@@ -282,12 +282,10 @@ void CollectionClonerTest::setUp() {
std::vector<BSONObj> CollectionClonerTest::makeSecondaryIndexSpecs(const NamespaceString& nss) {
return {BSON("v" << 1 << "key" << BSON("a" << 1) << "name"
<< "a_1"
- << "ns"
- << nss.ns()),
+ << "ns" << nss.ns()),
BSON("v" << 1 << "key" << BSON("b" << 1) << "name"
<< "b_1"
- << "ns"
- << nss.ns())};
+ << "ns" << nss.ns())};
}
void CollectionClonerTest::tearDown() {
@@ -442,8 +440,7 @@ TEST_F(CollectionClonerTest, CollectionClonerPassesThroughCommandStatusErrorFrom
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "count error"
- << "code"
- << int(ErrorCodes::OperationFailed)));
+ << "code" << int(ErrorCodes::OperationFailed)));
}
collectionCloner->join();
ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus());
@@ -565,15 +562,15 @@ TEST_F(CollectionClonerNoAutoIndexTest, DoNotCreateIDIndexIfAutoIndexIdUsed) {
const BSONObj idIndexSpec,
const std::vector<BSONObj>& theIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoader>> {
- auto loader = std::make_unique<CollectionBulkLoaderMock>(collectionStats);
- collNss = theNss;
- collOptions = theOptions;
- collIndexSpecs = theIndexSpecs;
- const auto status = loader->init(theIndexSpecs);
- if (!status.isOK())
- return status;
- return std::move(loader);
- };
+ auto loader = std::make_unique<CollectionBulkLoaderMock>(collectionStats);
+ collNss = theNss;
+ collOptions = theOptions;
+ collIndexSpecs = theIndexSpecs;
+ const auto status = loader->init(theIndexSpecs);
+ if (!status.isOK())
+ return status;
+ return std::move(loader);
+ };
const BSONObj doc = BSON("_id" << 1);
_server->insert(nss.ns(), doc);
@@ -632,13 +629,14 @@ TEST_F(CollectionClonerTest, ListIndexesReturnedNamespaceNotFound) {
bool collectionCreated = false;
bool writesAreReplicatedOnOpCtx = false;
NamespaceString collNss;
- storageInterface->createCollFn = [&collNss, &collectionCreated, &writesAreReplicatedOnOpCtx](
- OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) {
- writesAreReplicatedOnOpCtx = opCtx->writesAreReplicated();
- collectionCreated = true;
- collNss = nss;
- return Status::OK();
- };
+ storageInterface->createCollFn =
+ [&collNss, &collectionCreated, &writesAreReplicatedOnOpCtx](
+ OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) {
+ writesAreReplicatedOnOpCtx = opCtx->writesAreReplicated();
+ collectionCreated = true;
+ collNss = nss;
+ return Status::OK();
+ };
// Using a non-zero cursor to ensure that
// the cloner stops the fetcher from retrieving more results.
{
@@ -687,9 +685,9 @@ TEST_F(CollectionClonerTest,
// status.
auto exec = &getExecutor();
collectionCloner->setScheduleDbWorkFn_forTest([exec](
- executor::TaskExecutor::CallbackFn workFn) {
+ executor::TaskExecutor::CallbackFn workFn) {
auto wrappedTask = [workFn = std::move(workFn)](
- const executor::TaskExecutor::CallbackArgs& cbd) {
+ const executor::TaskExecutor::CallbackArgs& cbd) {
workFn(executor::TaskExecutor::CallbackArgs(
cbd.executor, cbd.myHandle, Status(ErrorCodes::CallbackCanceled, ""), cbd.opCtx));
};
@@ -697,8 +695,9 @@ TEST_F(CollectionClonerTest,
});
bool collectionCreated = false;
- storageInterface->createCollFn = [&collectionCreated](
- OperationContext*, const NamespaceString& nss, const CollectionOptions&) {
+ storageInterface->createCollFn = [&collectionCreated](OperationContext*,
+ const NamespaceString& nss,
+ const CollectionOptions&) {
collectionCreated = true;
return Status::OK();
};
@@ -1401,8 +1400,7 @@ TEST_F(CollectionClonerRenamedBeforeStartTest, BeginCollectionWithUUID) {
BSONObj expectedIdIndexSpec = BSON("v" << 1 << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << alternateNss.ns());
+ << "ns" << alternateNss.ns());
ASSERT_BSONOBJ_EQ(collIdIndexSpec, expectedIdIndexSpec);
auto expectedNonIdIndexSpecs = makeSecondaryIndexSpecs(alternateNss);
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index 5991744ccec..a0415918a54 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -117,8 +117,8 @@ DatabaseCloner::DatabaseCloner(executor::TaskExecutor* executor,
_dbname,
createListCollectionsCommandObject(_listCollectionsFilter),
[=](const StatusWith<Fetcher::QueryResponse>& result,
- Fetcher::NextAction * nextAction,
- BSONObjBuilder * getMoreBob) {
+ Fetcher::NextAction* nextAction,
+ BSONObjBuilder* getMoreBob) {
_listCollectionsCallback(result, nextAction, getMoreBob);
},
ReadPreferenceSetting::secondaryPreferredMetadata(),
@@ -263,9 +263,8 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
BSONObjBuilder* getMoreBob) {
if (!result.isOK()) {
_finishCallback(result.getStatus().withContext(
- str::stream() << "Error issuing listCollections on db '" << _dbname << "' (host:"
- << _source.toString()
- << ")"));
+ str::stream() << "Error issuing listCollections on db '" << _dbname
+ << "' (host:" << _source.toString() << ")"));
return;
}
@@ -311,12 +310,11 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
for (auto&& info : _collectionInfos) {
BSONElement nameElement = info.getField(kNameFieldName);
if (nameElement.eoo()) {
- _finishCallback_inlock(
- lk,
- {ErrorCodes::FailedToParse,
- str::stream() << "collection info must contain '" << kNameFieldName << "' "
- << "field : "
- << info});
+ _finishCallback_inlock(lk,
+ {ErrorCodes::FailedToParse,
+ str::stream() << "collection info must contain '"
+ << kNameFieldName << "' "
+ << "field : " << info});
return;
}
if (nameElement.type() != mongo::String) {
@@ -332,29 +330,24 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
{ErrorCodes::Error(51005),
str::stream()
<< "collection info contains duplicate collection name "
- << "'"
- << collectionName
- << "': "
- << info});
+ << "'" << collectionName << "': " << info});
return;
}
BSONElement optionsElement = info.getField(kOptionsFieldName);
if (optionsElement.eoo()) {
- _finishCallback_inlock(
- lk,
- {ErrorCodes::FailedToParse,
- str::stream() << "collection info must contain '" << kOptionsFieldName << "' "
- << "field : "
- << info});
+ _finishCallback_inlock(lk,
+ {ErrorCodes::FailedToParse,
+ str::stream() << "collection info must contain '"
+ << kOptionsFieldName << "' "
+ << "field : " << info});
return;
}
if (!optionsElement.isABSONObj()) {
_finishCallback_inlock(lk,
Status(ErrorCodes::TypeMismatch,
str::stream() << "'" << kOptionsFieldName
- << "' field must be an object: "
- << info));
+ << "' field must be an object: " << info));
return;
}
const BSONObj optionsObj = optionsElement.Obj();
@@ -428,8 +421,8 @@ void DatabaseCloner::_collectionClonerCallback(const Status& status, const Names
// Record failure, but do not return just yet, in case we want to do some logging.
if (!status.isOK()) {
- collStatus = status.withContext(
- str::stream() << "Error cloning collection '" << nss.toString() << "'");
+ collStatus = status.withContext(str::stream()
+ << "Error cloning collection '" << nss.toString() << "'");
}
// Forward collection cloner result to caller.
diff --git a/src/mongo/db/repl/database_cloner_test.cpp b/src/mongo/db/repl/database_cloner_test.cpp
index e294c1fae64..74ec6e36711 100644
--- a/src/mongo/db/repl/database_cloner_test.cpp
+++ b/src/mongo/db/repl/database_cloner_test.cpp
@@ -125,16 +125,16 @@ void DatabaseClonerTest::setUp() {
const BSONObj& idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoaderMock>> {
- const auto collInfo = &_collections[nss];
+ const auto collInfo = &_collections[nss];
- auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
- auto status = localLoader->init(secondaryIndexSpecs);
- if (!status.isOK())
- return status;
- collInfo->loader = localLoader.get();
+ auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
+ auto status = localLoader->init(secondaryIndexSpecs);
+ if (!status.isOK())
+ return status;
+ collInfo->loader = localLoader.get();
- return std::move(localLoader);
- };
+ return std::move(localLoader);
+ };
}
void DatabaseClonerTest::tearDown() {
@@ -333,8 +333,7 @@ TEST_F(DatabaseClonerTest, InvalidListCollectionsFilter) {
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "unknown operator"
- << "code"
- << ErrorCodes::BadValue));
+ << "code" << ErrorCodes::BadValue));
}
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
@@ -389,16 +388,13 @@ TEST_F(DatabaseClonerTest, ListCollectionsPredicate) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()),
+ << "options" << _options1.toBSON()),
BSON("name"
<< "b"
- << "options"
- << _options2.toBSON()),
+ << "options" << _options2.toBSON()),
BSON("name"
<< "c"
- << "options"
- << _options3.toBSON())};
+ << "options" << _options3.toBSON())};
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(createListCollectionsResponse(
@@ -423,12 +419,10 @@ TEST_F(DatabaseClonerTest, ListCollectionsMultipleBatches) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()),
+ << "options" << _options1.toBSON()),
BSON("name"
<< "b"
- << "options"
- << _options2.toBSON())};
+ << "options" << _options2.toBSON())};
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(createListCollectionsResponse(1, BSON_ARRAY(sourceInfos[0])));
@@ -510,8 +504,7 @@ TEST_F(DatabaseClonerTest, CollectionInfoNameEmpty) {
createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< ""
- << "options"
- << _options1.toBSON()))));
+ << "options" << _options1.toBSON()))));
}
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
@@ -532,12 +525,10 @@ TEST_F(DatabaseClonerTest, CollectionInfoNameDuplicate) {
createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options"
- << _options1.toBSON())
+ << "options" << _options1.toBSON())
<< BSON("name"
<< "a"
- << "options"
- << _options2.toBSON()))));
+ << "options" << _options2.toBSON()))));
}
ASSERT_EQUALS(51005, getStatus().code());
@@ -573,11 +564,11 @@ TEST_F(DatabaseClonerTest, CollectionInfoOptionsNotAnObject) {
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- processNetworkResponse(createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << 123))));
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << 123))));
}
ASSERT_EQUALS(ErrorCodes::TypeMismatch, getStatus().code());
@@ -594,12 +585,11 @@ TEST_F(DatabaseClonerTest, InvalidCollectionOptions) {
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << BSON("storageEngine" << 1)))));
+ processNetworkResponse(createListCollectionsResponse(
+ 0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << BSON("storageEngine" << 1)))));
}
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
@@ -615,11 +605,11 @@ TEST_F(DatabaseClonerTest, InvalidMissingUUID) {
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- processNetworkResponse(createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << BSONObj()))));
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << BSONObj()))));
}
ASSERT_EQUALS(50953, getStatus().code());
@@ -668,11 +658,11 @@ TEST_F(DatabaseClonerTest, ListCollectionsReturnsEmptyCollectionName) {
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- processNetworkResponse(createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << ""
- << "options"
- << BSONObj()))));
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << ""
+ << "options" << BSONObj()))));
}
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
@@ -698,8 +688,7 @@ TEST_F(DatabaseClonerTest, StartFirstCollectionClonerFailed) {
createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()))));
+ << "options" << _options1.toBSON()))));
}
ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
@@ -730,12 +719,10 @@ TEST_F(DatabaseClonerTest, StartSecondCollectionClonerFailed) {
createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options"
- << _options1.toBSON())
+ << "options" << _options1.toBSON())
<< BSON("name"
<< "b"
- << "options"
- << _options2.toBSON()))));
+ << "options" << _options2.toBSON()))));
processNetworkResponse(createCountResponse(0));
processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
@@ -761,8 +748,7 @@ TEST_F(DatabaseClonerTest, ShutdownCancelsCollectionCloning) {
0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options"
- << _options1.toBSON())))));
+ << "options" << _options1.toBSON())))));
net->runReadyNetworkOperations();
// CollectionCloner sends collection count request on startup.
@@ -795,12 +781,10 @@ TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()),
+ << "options" << _options1.toBSON()),
BSON("name"
<< "b"
- << "options"
- << _options2.toBSON())};
+ << "options" << _options2.toBSON())};
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(
@@ -816,8 +800,7 @@ TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) {
processNetworkResponse(createCountResponse(0));
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "fake message"
- << "code"
- << ErrorCodes::CursorNotFound));
+ << "code" << ErrorCodes::CursorNotFound));
processNetworkResponse(createCountResponse(0));
processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
@@ -852,12 +835,10 @@ TEST_F(DatabaseClonerTest, CreateCollections) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()),
+ << "options" << _options1.toBSON()),
BSON("name"
<< "b"
- << "options"
- << _options2.toBSON())};
+ << "options" << _options2.toBSON())};
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(
diff --git a/src/mongo/db/repl/databases_cloner_test.cpp b/src/mongo/db/repl/databases_cloner_test.cpp
index dea8b12cbab..ba75f96c6e4 100644
--- a/src/mongo/db/repl/databases_cloner_test.cpp
+++ b/src/mongo/db/repl/databases_cloner_test.cpp
@@ -177,19 +177,19 @@ protected:
const BSONObj idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoaderMock>> {
- // Get collection info from map.
- const auto collInfo = &_collections[nss];
- if (collInfo->stats->initCalled) {
- log() << "reusing collection during test which may cause problems, ns:" << nss;
- }
- auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
- auto status = localLoader->init(secondaryIndexSpecs);
- if (!status.isOK())
- return status;
- collInfo->loader = localLoader.get();
-
- return std::move(localLoader);
- };
+ // Get collection info from map.
+ const auto collInfo = &_collections[nss];
+ if (collInfo->stats->initCalled) {
+ log() << "reusing collection during test which may cause problems, ns:" << nss;
+ }
+ auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
+ auto status = localLoader->init(secondaryIndexSpecs);
+ if (!status.isOK())
+ return status;
+ collInfo->loader = localLoader.get();
+
+ return std::move(localLoader);
+ };
_dbWorkThreadPool.startup();
_target = HostAndPort{"local:1234"};
@@ -924,13 +924,13 @@ TEST_F(DBsClonerTest, SingleDatabaseCopiesCompletely) {
{"listDatabases", fromjson("{ok:1, databases:[{name:'a'}]}")},
// listCollections for "a"
{"listCollections",
- BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
- << "a.$cmd.listCollections"
- << "firstBatch"
- << BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << options.toBSON()))))},
+ BSON("ok" << 1 << "cursor"
+ << BSON("id" << 0ll << "ns"
+ << "a.$cmd.listCollections"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << options.toBSON()))))},
// count:a
{"count", BSON("n" << 1 << "ok" << 1)},
// listIndexes:a
@@ -957,13 +957,13 @@ TEST_F(DBsClonerTest, TwoDatabasesCopiesCompletely) {
{"listDatabases", fromjson("{ok:1, databases:[{name:'a'}, {name:'b'}]}")},
// listCollections for "a"
{"listCollections",
- BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
- << "a.$cmd.listCollections"
- << "firstBatch"
- << BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << options1.toBSON()))))},
+ BSON("ok" << 1 << "cursor"
+ << BSON("id" << 0ll << "ns"
+ << "a.$cmd.listCollections"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << options1.toBSON()))))},
// count:a
{"count", BSON("n" << 1 << "ok" << 1)},
// listIndexes:a
@@ -974,13 +974,13 @@ TEST_F(DBsClonerTest, TwoDatabasesCopiesCompletely) {
<< ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}")},
// listCollections for "b"
{"listCollections",
- BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
- << "b.$cmd.listCollections"
- << "firstBatch"
- << BSON_ARRAY(BSON("name"
- << "b"
- << "options"
- << options2.toBSON()))))},
+ BSON("ok" << 1 << "cursor"
+ << BSON("id" << 0ll << "ns"
+ << "b.$cmd.listCollections"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("name"
+ << "b"
+ << "options" << options2.toBSON()))))},
// count:b
{"count", BSON("n" << 2 << "ok" << 1)},
// listIndexes:b
diff --git a/src/mongo/db/repl/dbcheck.cpp b/src/mongo/db/repl/dbcheck.cpp
index cd546957b8d..289b831b795 100644
--- a/src/mongo/db/repl/dbcheck.cpp
+++ b/src/mongo/db/repl/dbcheck.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/dbcheck.h"
-#include "mongo/db/repl/dbcheck.h"
#include "mongo/db/repl/dbcheck_gen.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/optime.h"
@@ -132,7 +131,7 @@ std::unique_ptr<HealthLogEntry> dbCheckHealthLogEntry(const NamespaceString& nss
entry->setData(data);
return entry;
}
-}
+} // namespace
/**
* Get an error message if the check fails.
@@ -161,14 +160,9 @@ std::unique_ptr<HealthLogEntry> dbCheckBatchEntry(const NamespaceString& nss,
const repl::OpTime& optime) {
auto hashes = expectedFound(expectedHash, foundHash);
- auto data =
- BSON("success" << true << "count" << count << "bytes" << bytes << "md5" << hashes.second
- << "minKey"
- << minKey.elem()
- << "maxKey"
- << maxKey.elem()
- << "optime"
- << optime);
+ auto data = BSON("success" << true << "count" << count << "bytes" << bytes << "md5"
+ << hashes.second << "minKey" << minKey.elem() << "maxKey"
+ << maxKey.elem() << "optime" << optime);
auto severity = hashes.first ? SeverityEnum::Info : SeverityEnum::Error;
std::string msg =
@@ -284,19 +278,9 @@ std::unique_ptr<HealthLogEntry> dbCheckCollectionEntry(const NamespaceString& ns
std::string msg =
"dbCheck collection " + (match ? std::string("consistent") : std::string("inconsistent"));
auto data = BSON("success" << true << "uuid" << uuid.toString() << "found" << true << "name"
- << names.second
- << "prev"
- << prevs.second
- << "next"
- << nexts.second
- << "indexes"
- << indices.second
- << "options"
- << options.second
- << "md5"
- << md5s.second
- << "optime"
- << optime);
+ << names.second << "prev" << prevs.second << "next" << nexts.second
+ << "indexes" << indices.second << "options" << options.second
+ << "md5" << md5s.second << "optime" << optime);
return dbCheckHealthLogEntry(nss, severity, msg, OplogEntriesEnum::Collection, data);
}
@@ -520,7 +504,7 @@ Status dbCheckDatabaseOnSecondary(OperationContext* opCtx,
return Status::OK();
}
-}
+} // namespace
namespace repl {
diff --git a/src/mongo/db/repl/dbcheck.h b/src/mongo/db/repl/dbcheck.h
index dde6de369b8..457087a9365 100644
--- a/src/mongo/db/repl/dbcheck.h
+++ b/src/mongo/db/repl/dbcheck.h
@@ -228,5 +228,5 @@ Status dbCheckOplogCommand(OperationContext* opCtx,
const repl::OplogEntry& entry,
OplogApplication::Mode mode,
boost::optional<Timestamp> stableTimestampForRecovery);
-}
-}
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/dbcheck_idl.h b/src/mongo/db/repl/dbcheck_idl.h
index c49bff7a5b1..9e2d9c880e6 100644
--- a/src/mongo/db/repl/dbcheck_idl.h
+++ b/src/mongo/db/repl/dbcheck_idl.h
@@ -91,4 +91,4 @@ private:
explicit BSONKey(const BSONElement& elem);
BSONObj _obj;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
index bf2df1aa0cf..43fb891728f 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
+++ b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
@@ -252,7 +252,7 @@ TEST_F(DropPendingCollectionReaperTest,
decltype(dpns) droppedNss;
bool writesAreReplicatedDuringDrop = true;
storageInterfaceMock.dropCollFn = [&droppedNss, &writesAreReplicatedDuringDrop](
- OperationContext* opCtx, const NamespaceString& nss) {
+ OperationContext* opCtx, const NamespaceString& nss) {
droppedNss = nss;
writesAreReplicatedDuringDrop = opCtx->writesAreReplicated();
return Status::OK();
diff --git a/src/mongo/db/repl/election_reason_counter.h b/src/mongo/db/repl/election_reason_counter.h
index 801d9855a45..89a8dac9b81 100644
--- a/src/mongo/db/repl/election_reason_counter.h
+++ b/src/mongo/db/repl/election_reason_counter.h
@@ -40,8 +40,8 @@ namespace repl {
class ElectionReasonCounter : public ElectionReasonCounterBase {
public:
using ElectionReasonCounterBase::getCalled;
- using ElectionReasonCounterBase::setCalled;
using ElectionReasonCounterBase::getSuccessful;
+ using ElectionReasonCounterBase::setCalled;
using ElectionReasonCounterBase::setSuccessful;
void incrementCalled() {
diff --git a/src/mongo/db/repl/idempotency_test_fixture.cpp b/src/mongo/db/repl/idempotency_test_fixture.cpp
index 97b3f86fa2d..8b99c794a90 100644
--- a/src/mongo/db/repl/idempotency_test_fixture.cpp
+++ b/src/mongo/db/repl/idempotency_test_fixture.cpp
@@ -662,12 +662,7 @@ template OplogEntry IdempotencyTest::update<const char*>(char const* _id, const
BSONObj makeInsertApplyOpsEntry(const NamespaceString& nss, const UUID& uuid, const BSONObj& doc) {
return BSON("op"
<< "i"
- << "ns"
- << nss.toString()
- << "ui"
- << uuid
- << "o"
- << doc);
+ << "ns" << nss.toString() << "ui" << uuid << "o" << doc);
}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 683642a83d3..d4696f7f92a 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -482,8 +482,7 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
auto status = _checkForShutdownAndConvertStatus_inlock(
callbackArgs,
str::stream() << "error while starting initial sync attempt " << (initialSyncAttempt + 1)
- << " of "
- << initialSyncMaxAttempts);
+ << " of " << initialSyncMaxAttempts);
if (!status.isOK()) {
_finishInitialSyncAttempt(status);
return;
@@ -747,11 +746,8 @@ void InitialSyncer::_getBeginFetchingOpTimeCallback(
Status(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "Expected to receive one document for the oldest active "
"transaction entry, but received: "
- << docs.size()
- << ". First: "
- << redact(docs.front())
- << ". Last: "
- << redact(docs.back())));
+ << docs.size() << ". First: " << redact(docs.front())
+ << ". Last: " << redact(docs.back())));
return;
}
@@ -858,11 +854,8 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
Status(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "Expected to receive one feature compatibility version "
"document, but received: "
- << docs.size()
- << ". First: "
- << redact(docs.front())
- << ". Last: "
- << redact(docs.back())));
+ << docs.size() << ". First: " << redact(docs.front())
+ << ". Last: " << redact(docs.back())));
return;
}
const auto hasDoc = docs.begin() != docs.end();
@@ -1518,8 +1511,8 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
}
Status InitialSyncer::_scheduleLastOplogEntryFetcher_inlock(Fetcher::CallbackFn callback) {
- BSONObj query = BSON(
- "find" << _opts.remoteOplogNS.coll() << "sort" << BSON("$natural" << -1) << "limit" << 1);
+ BSONObj query = BSON("find" << _opts.remoteOplogNS.coll() << "sort" << BSON("$natural" << -1)
+ << "limit" << 1);
_lastOplogEntryFetcher =
std::make_unique<Fetcher>(_exec,
@@ -1670,13 +1663,12 @@ Status InitialSyncer::_scheduleWorkAtAndSaveHandle_inlock(
if (_isShuttingDown_inlock()) {
return Status(ErrorCodes::CallbackCanceled,
str::stream() << "failed to schedule work " << name << " at "
- << when.toString()
- << ": initial syncer is shutting down");
+ << when.toString() << ": initial syncer is shutting down");
}
auto result = _exec->scheduleWorkAt(when, std::move(work));
if (!result.isOK()) {
- return result.getStatus().withContext(
- str::stream() << "failed to schedule work " << name << " at " << when.toString());
+ return result.getStatus().withContext(str::stream() << "failed to schedule work " << name
+ << " at " << when.toString());
}
*handle = result.getValue();
return Status::OK();
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index ff548abbe55..820c4147fa4 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -295,19 +295,19 @@ protected:
const BSONObj idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoaderMock>> {
- // Get collection info from map.
- const auto collInfo = &_collections[nss];
- if (collInfo->stats->initCalled) {
- log() << "reusing collection during test which may cause problems, ns:" << nss;
- }
- auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
- auto status = localLoader->init(secondaryIndexSpecs);
- if (!status.isOK())
- return status;
- collInfo->loader = localLoader.get();
+ // Get collection info from map.
+ const auto collInfo = &_collections[nss];
+ if (collInfo->stats->initCalled) {
+ log() << "reusing collection during test which may cause problems, ns:" << nss;
+ }
+ auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
+ auto status = localLoader->init(secondaryIndexSpecs);
+ if (!status.isOK())
+ return status;
+ collInfo->loader = localLoader.get();
- return std::move(localLoader);
- };
+ return std::move(localLoader);
+ };
_dbWorkThreadPool = std::make_unique<ThreadPool>(ThreadPool::Options());
_dbWorkThreadPool->startup();
@@ -355,17 +355,13 @@ protected:
dataReplicatorExternalState->lastCommittedOpTime = _myLastOpTime;
{
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "myset"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"))
- << "settings"
- << BSON("electionTimeoutMillis" << 10000))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "myset"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("electionTimeoutMillis" << 10000))));
dataReplicatorExternalState->replSetConfigResult = config;
}
_externalState = dataReplicatorExternalState.get();
@@ -1153,14 +1149,14 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughGetBeginFetchingOpTimeSchedu
// We reject the 'find' command for the begin fetching optime and save the request for
// inspection at the end of this test case.
executor::RemoteCommandRequest request;
- _executorProxy->shouldFailScheduleRemoteCommandRequest = [&request](
- const executor::RemoteCommandRequestOnAny& requestToSend) {
- request = {requestToSend, 0};
- auto elem = requestToSend.cmdObj.firstElement();
- return (
- ("find" == elem.fieldNameStringData()) &&
- (NamespaceString::kSessionTransactionsTableNamespace.coll() == elem.valueStringData()));
- };
+ _executorProxy->shouldFailScheduleRemoteCommandRequest =
+ [&request](const executor::RemoteCommandRequestOnAny& requestToSend) {
+ request = {requestToSend, 0};
+ auto elem = requestToSend.cmdObj.firstElement();
+ return (("find" == elem.fieldNameStringData()) &&
+ (NamespaceString::kSessionTransactionsTableNamespace.coll() ==
+ elem.valueStringData()));
+ };
HostAndPort syncSource("localhost", 12345);
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource);
@@ -1243,12 +1239,13 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughLastOplogEntryFetcherSchedul
// We reject the 'find' command on the oplog and save the request for inspection at the end of
// this test case.
executor::RemoteCommandRequest request;
- _executorProxy->shouldFailScheduleRemoteCommandRequest = [&request](
- const executor::RemoteCommandRequestOnAny& requestToSend) {
- request = {requestToSend, 0};
- auto elem = requestToSend.cmdObj.firstElement();
- return (("find" == elem.fieldNameStringData()) && ("oplog.rs" == elem.valueStringData()));
- };
+ _executorProxy->shouldFailScheduleRemoteCommandRequest =
+ [&request](const executor::RemoteCommandRequestOnAny& requestToSend) {
+ request = {requestToSend, 0};
+ auto elem = requestToSend.cmdObj.firstElement();
+ return (("find" == elem.fieldNameStringData()) &&
+ ("oplog.rs" == elem.valueStringData()));
+ };
HostAndPort syncSource("localhost", 12345);
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource);
@@ -1663,8 +1660,7 @@ TEST_F(InitialSyncerTest,
TEST_F(InitialSyncerTest,
InitialSyncerReturnsIncompatibleServerVersionWhenFCVFetcherReturnsUpgradeTargetVersion) {
auto docs = {BSON("_id" << FeatureCompatibilityVersionParser::kParameterName << "version"
- << FeatureCompatibilityVersionParser::kVersion40
- << "targetVersion"
+ << FeatureCompatibilityVersionParser::kVersion40 << "targetVersion"
<< FeatureCompatibilityVersionParser::kVersion42)};
runInitialSyncWithBadFCVResponse(docs, ErrorCodes::IncompatibleServerVersion);
}
@@ -1672,8 +1668,7 @@ TEST_F(InitialSyncerTest,
TEST_F(InitialSyncerTest,
InitialSyncerReturnsIncompatibleServerVersionWhenFCVFetcherReturnsDowngradeTargetVersion) {
auto docs = {BSON("_id" << FeatureCompatibilityVersionParser::kParameterName << "version"
- << FeatureCompatibilityVersionParser::kVersion40
- << "targetVersion"
+ << FeatureCompatibilityVersionParser::kVersion40 << "targetVersion"
<< FeatureCompatibilityVersionParser::kVersion40)};
runInitialSyncWithBadFCVResponse(docs, ErrorCodes::IncompatibleServerVersion);
}
@@ -2224,8 +2219,7 @@ TEST_F(InitialSyncerTest,
<< "dbinfo")
<< BSON("name"
<< "b"))
- << "ok"
- << 1)));
+ << "ok" << 1)));
net->runReadyNetworkOperations();
// Oplog tailing query.
@@ -2592,8 +2586,7 @@ TEST_F(
// Second last oplog entry fetcher.
processSuccessfulLastOplogEntryFetcherResponse({BSON("ts"
<< "not a timestamp"
- << "t"
- << 1)});
+ << "t" << 1)});
// _lastOplogEntryFetcherCallbackAfterCloningData() will shut down the OplogFetcher after
// setting the completion status.
@@ -3209,8 +3202,7 @@ TEST_F(InitialSyncerTest, LastOpTimeShouldBeSetEvenIfNoOperationsAreAppliedAfter
NamespaceString(nss.getCommandNS()),
{BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << nss.ns())})));
+ << "ns" << nss.ns())})));
ASSERT_EQUALS(*_options1.uuid, UUID::parse(request.cmdObj.firstElement()));
ASSERT_EQUALS(nss.db(), request.dbname);
@@ -3930,8 +3922,7 @@ TEST_F(InitialSyncerTest,
NamespaceString(nss.getCommandNS()),
{BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << nss.ns())}));
+ << "ns" << nss.ns())}));
assertRemoteCommandNameEquals("listIndexes", request);
ASSERT_EQUALS(*_options1.uuid, UUID::parse(request.cmdObj.firstElement()));
ASSERT_EQUALS(nss.db(), request.dbname);
@@ -4322,8 +4313,7 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressReturnsCorrectProgress) {
NamespaceString(nss.getCommandNS()),
{BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << nss.ns())}));
+ << "ns" << nss.ns())}));
assertRemoteCommandNameEquals("listIndexes", request);
ASSERT_EQUALS(*_options1.uuid, UUID::parse(request.cmdObj.firstElement()));
ASSERT_EQUALS(nss.db(), request.dbname);
diff --git a/src/mongo/db/repl/is_master_response.cpp b/src/mongo/db/repl/is_master_response.cpp
index e160054208b..06e0d1c1896 100644
--- a/src/mongo/db/repl/is_master_response.cpp
+++ b/src/mongo/db/repl/is_master_response.cpp
@@ -220,8 +220,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Found \"" << kIsReplicaSetFieldName
<< "\" field which should indicate that no valid config "
"is loaded, but we didn't also have an \""
- << kInfoFieldName
- << "\" field as we expected");
+ << kInfoFieldName << "\" field as we expected");
}
}
@@ -248,8 +247,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kHostsFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String)
- << " but found type "
+ << typeName(String) << " but found type "
<< typeName(hostElement.type()));
}
_hosts.push_back(HostAndPort(hostElement.String()));
@@ -269,8 +267,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kPassivesFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String)
- << " but found type "
+ << typeName(String) << " but found type "
<< typeName(passiveElement.type()));
}
_passives.push_back(HostAndPort(passiveElement.String()));
@@ -290,8 +287,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kArbitersFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String)
- << " but found type "
+ << typeName(String) << " but found type "
<< typeName(arbiterElement.type()));
}
_arbiters.push_back(HostAndPort(arbiterElement.String()));
@@ -364,8 +360,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kTagsFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(String)
- << " but found type "
+ << typeName(String) << " but found type "
<< typeName(tagsElement.type()));
}
_tags[tagElement.fieldNameStringData().toString()] = tagElement.String();
@@ -397,8 +392,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastWriteOpTimeFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Object)
- << " but found type "
+ << typeName(Object) << " but found type "
<< typeName(lastWriteOpTimeElement.type()));
}
auto lastWriteOpTime = OpTime::parseFromOplogEntry(lastWriteOpTimeElement.Obj());
@@ -418,8 +412,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastWriteDateFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Date)
- << " but found type "
+ << typeName(Date) << " but found type "
<< typeName(lastWriteDateElement.type()));
}
if (_lastWrite) {
@@ -439,8 +432,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastMajorityWriteOpTimeFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Object)
- << " but found type "
+ << typeName(Object) << " but found type "
<< typeName(lastMajorityWriteOpTimeElement.type()));
}
auto lastMajorityWriteOpTime =
@@ -461,8 +453,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastMajorityWriteDateFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Date)
- << " but found type "
+ << typeName(Date) << " but found type "
<< typeName(lastMajorityWriteDateElement.type()));
}
if (_lastMajorityWrite) {
diff --git a/src/mongo/db/repl/isself.cpp b/src/mongo/db/repl/isself.cpp
index 398367e6da3..b50a893efb8 100644
--- a/src/mongo/db/repl/isself.cpp
+++ b/src/mongo/db/repl/isself.cpp
@@ -273,8 +273,7 @@ std::vector<std::string> getBoundAddrs(const bool ipv6enabled) {
for (int tries = 0; tries < 3; ++tries) {
err = GetAdaptersAddresses(family,
GAA_FLAG_SKIP_ANYCAST | // only want unicast addrs
- GAA_FLAG_SKIP_MULTICAST |
- GAA_FLAG_SKIP_DNS_SERVER,
+ GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER,
nullptr,
adapters,
&adaptersLen);
diff --git a/src/mongo/db/repl/member_config.cpp b/src/mongo/db/repl/member_config.cpp
index 02fb978173e..5ab43763c8c 100644
--- a/src/mongo/db/repl/member_config.cpp
+++ b/src/mongo/db/repl/member_config.cpp
@@ -178,9 +178,9 @@ MemberConfig::MemberConfig(const BSONObj& mcfg, ReplSetTagConfig* tagConfig) {
for (auto&& tag : tagsElement.Obj()) {
if (tag.type() != String) {
uasserted(ErrorCodes::TypeMismatch,
- str::stream() << "tags." << tag.fieldName()
- << " field has non-string value of type "
- << typeName(tag.type()));
+ str::stream()
+ << "tags." << tag.fieldName()
+ << " field has non-string value of type " << typeName(tag.type()));
}
_tags.push_back(tagConfig->makeTag(tag.fieldNameStringData(), tag.valueStringData()));
}
@@ -240,9 +240,9 @@ Status MemberConfig::validate() const {
}
if (_slaveDelay < Seconds(0) || _slaveDelay > kMaxSlaveDelay) {
return Status(ErrorCodes::BadValue,
- str::stream() << kSlaveDelayFieldName << " field value of "
- << durationCount<Seconds>(_slaveDelay)
- << " seconds is out of range");
+ str::stream()
+ << kSlaveDelayFieldName << " field value of "
+ << durationCount<Seconds>(_slaveDelay) << " seconds is out of range");
}
// Check for additional electable requirements, when priority is non zero
if (_priority != 0) {
diff --git a/src/mongo/db/repl/member_config_test.cpp b/src/mongo/db/repl/member_config_test.cpp
index 6176d230463..cf84b37ccdc 100644
--- a/src/mongo/db/repl/member_config_test.cpp
+++ b/src/mongo/db/repl/member_config_test.cpp
@@ -60,8 +60,7 @@ TEST(MemberConfig, ParseFailsWithIllegalFieldName) {
ReplSetTagConfig tagConfig;
ASSERT_THROWS(MemberConfig(BSON("_id" << 0 << "host"
<< "localhost"
- << "frim"
- << 1),
+ << "frim" << 1),
&tagConfig),
ExceptionFor<ErrorCodes::BadValue>);
}
@@ -133,8 +132,7 @@ TEST(MemberConfig, ParseArbiterOnly) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "arbiterOnly"
- << 1.0),
+ << "arbiterOnly" << 1.0),
&tagConfig);
ASSERT_TRUE(mc.isArbiter());
ASSERT_EQUALS(0.0, mc.getPriority());
@@ -142,8 +140,7 @@ TEST(MemberConfig, ParseArbiterOnly) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "arbiterOnly"
- << false),
+ << "arbiterOnly" << false),
&tagConfig);
ASSERT_TRUE(!mc.isArbiter());
ASSERT_EQUALS(1.0, mc.getPriority());
@@ -155,16 +152,14 @@ TEST(MemberConfig, ParseHidden) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "hidden"
- << 1.0),
+ << "hidden" << 1.0),
&tagConfig);
ASSERT_TRUE(mc.isHidden());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "hidden"
- << false),
+ << "hidden" << false),
&tagConfig);
ASSERT_TRUE(!mc.isHidden());
}
@@ -181,16 +176,14 @@ TEST(MemberConfig, ParseBuildIndexes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "buildIndexes"
- << 1.0),
+ << "buildIndexes" << 1.0),
&tagConfig);
ASSERT_TRUE(mc.shouldBuildIndexes());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "buildIndexes"
- << false),
+ << "buildIndexes" << false),
&tagConfig);
ASSERT_TRUE(!mc.shouldBuildIndexes());
}
@@ -201,18 +194,14 @@ TEST(MemberConfig, ParseVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1.0),
+ << "votes" << 1.0),
&tagConfig);
ASSERT_TRUE(mc.isVoter());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0
- << "priority"
- << 0),
+ << "votes" << 0 << "priority" << 0),
&tagConfig);
ASSERT_FALSE(mc.isVoter());
}
@@ -220,38 +209,33 @@ TEST(MemberConfig, ParseVotes) {
// For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1.5),
+ << "votes" << 1.5),
&tagConfig);
ASSERT_TRUE(mc.isVoter());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0.5),
+ << "votes" << 0.5),
&tagConfig);
ASSERT_FALSE(mc.isVoter());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << -0.5),
+ << "votes" << -0.5),
&tagConfig);
ASSERT_FALSE(mc.isVoter());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 2),
+ << "votes" << 2),
&tagConfig);
}
ASSERT_THROWS(MemberConfig(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << Date_t::fromMillisSinceEpoch(2)),
+ << "votes" << Date_t::fromMillisSinceEpoch(2)),
&tagConfig),
ExceptionFor<ErrorCodes::TypeMismatch>);
}
@@ -261,31 +245,27 @@ TEST(MemberConfig, ParsePriority) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1),
+ << "priority" << 1),
&tagConfig);
ASSERT_EQUALS(1.0, mc.getPriority());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0),
+ << "priority" << 0),
&tagConfig);
ASSERT_EQUALS(0.0, mc.getPriority());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 100.8),
+ << "priority" << 100.8),
&tagConfig);
ASSERT_EQUALS(100.8, mc.getPriority());
}
ASSERT_THROWS(MemberConfig(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << Date_t::fromMillisSinceEpoch(2)),
+ << "priority" << Date_t::fromMillisSinceEpoch(2)),
&tagConfig),
ExceptionFor<ErrorCodes::TypeMismatch>);
}
@@ -294,8 +274,7 @@ TEST(MemberConfig, ParseSlaveDelay) {
ReplSetTagConfig tagConfig;
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "slaveDelay"
- << 100),
+ << "slaveDelay" << 100),
&tagConfig);
ASSERT_EQUALS(Seconds(100), mc.getSlaveDelay());
}
@@ -365,14 +344,13 @@ TEST(MemberConfig, DuplicateHorizonNames) {
ASSERT_NOT_EQUALS(s.reason().find("Duplicate horizon name found"), std::string::npos);
}
try {
- MemberConfig(BSON("_id" << 0 << "host"
- << "h"
- << "horizons"
- << BSON("someUniqueHorizonName"
- << "a.host:43"
- << SplitHorizon::kDefaultHorizon
- << "b.host:256")),
- &tagConfig);
+ MemberConfig(
+ BSON("_id" << 0 << "host"
+ << "h"
+ << "horizons"
+ << BSON("someUniqueHorizonName"
+ << "a.host:43" << SplitHorizon::kDefaultHorizon << "b.host:256")),
+ &tagConfig);
ASSERT_TRUE(false); // Should not succeed.
} catch (const ExceptionFor<ErrorCodes::BadValue>& ex) {
const Status& s = ex.toStatus();
@@ -489,8 +467,7 @@ TEST(MemberConfig, ValidateVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1.0),
+ << "votes" << 1.0),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_TRUE(mc.isVoter());
@@ -498,10 +475,7 @@ TEST(MemberConfig, ValidateVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0
- << "priority"
- << 0),
+ << "votes" << 0 << "priority" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -510,8 +484,7 @@ TEST(MemberConfig, ValidateVotes) {
// For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1.5),
+ << "votes" << 1.5),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_TRUE(mc.isVoter());
@@ -519,10 +492,7 @@ TEST(MemberConfig, ValidateVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0.5
- << "priority"
- << 0),
+ << "votes" << 0.5 << "priority" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -530,10 +500,7 @@ TEST(MemberConfig, ValidateVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << -0.5
- << "priority"
- << 0),
+ << "votes" << -0.5 << "priority" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -542,16 +509,14 @@ TEST(MemberConfig, ValidateVotes) {
// Invalid values
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 2),
+ << "votes" << 2),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << -1),
+ << "votes" << -1),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -562,32 +527,28 @@ TEST(MemberConfig, ValidatePriorityRanges) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0),
+ << "priority" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1000),
+ << "priority" << 1000),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << -1),
+ << "priority" << -1),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1001),
+ << "priority" << 1001),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -598,40 +559,28 @@ TEST(MemberConfig, ValidateSlaveDelays) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0
- << "slaveDelay"
- << 0),
+ << "priority" << 0 << "slaveDelay" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0
- << "slaveDelay"
- << 3600 * 10),
+ << "priority" << 0 << "slaveDelay" << 3600 * 10),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0
- << "slaveDelay"
- << -1),
+ << "priority" << 0 << "slaveDelay" << -1),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0
- << "slaveDelay"
- << 3600 * 24 * 400),
+ << "priority" << 0 << "slaveDelay" << 3600 * 24 * 400),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -641,10 +590,7 @@ TEST(MemberConfig, ValidatePriorityAndSlaveDelayRelationship) {
ReplSetTagConfig tagConfig;
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "slaveDelay"
- << 60),
+ << "priority" << 1 << "slaveDelay" << 60),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -654,20 +600,14 @@ TEST(MemberConfig, ValidatePriorityAndHiddenRelationship) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "hidden"
- << true),
+ << "priority" << 1 << "hidden" << true),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "hidden"
- << false),
+ << "priority" << 1 << "hidden" << false),
&tagConfig);
ASSERT_OK(mc.validate());
}
@@ -678,10 +618,7 @@ TEST(MemberConfig, ValidatePriorityAndBuildIndexesRelationship) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "buildIndexes"
- << false),
+ << "priority" << 1 << "buildIndexes" << false),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
@@ -689,10 +626,7 @@ TEST(MemberConfig, ValidatePriorityAndBuildIndexesRelationship) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "buildIndexes"
- << true),
+ << "priority" << 1 << "buildIndexes" << true),
&tagConfig);
ASSERT_OK(mc.validate());
}
@@ -703,42 +637,28 @@ TEST(MemberConfig, ValidateArbiterVotesRelationship) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1
- << "arbiterOnly"
- << true),
+ << "votes" << 1 << "arbiterOnly" << true),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0
- << "priority"
- << 0
- << "arbiterOnly"
- << false),
+ << "votes" << 0 << "priority" << 0 << "arbiterOnly" << false),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1
- << "arbiterOnly"
- << false),
+ << "votes" << 1 << "arbiterOnly" << false),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0
- << "arbiterOnly"
- << true),
+ << "votes" << 0 << "arbiterOnly" << true),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
diff --git a/src/mongo/db/repl/member_data.cpp b/src/mongo/db/repl/member_data.cpp
index 40a081ba6a2..247167bc150 100644
--- a/src/mongo/db/repl/member_data.cpp
+++ b/src/mongo/db/repl/member_data.cpp
@@ -141,8 +141,9 @@ void MemberData::setLastDurableOpTimeAndWallTime(OpTimeAndWallTime opTime, Date_
// TODO(russotto): We think this should never happen, rollback or no rollback. Make this an
// invariant and see what happens.
log() << "Durable progress (" << opTime.opTime << ") is ahead of the applied progress ("
- << _lastAppliedOpTime << ". This is likely due to a "
- "rollback."
+ << _lastAppliedOpTime
+ << ". This is likely due to a "
+ "rollback."
<< " memberid: " << _memberId << _hostAndPort.toString()
<< " previous durable progress: " << _lastDurableOpTime;
} else {
diff --git a/src/mongo/db/repl/mock_repl_coord_server_fixture.h b/src/mongo/db/repl/mock_repl_coord_server_fixture.h
index 9bac2e16d74..7f52f4a3f21 100644
--- a/src/mongo/db/repl/mock_repl_coord_server_fixture.h
+++ b/src/mongo/db/repl/mock_repl_coord_server_fixture.h
@@ -39,7 +39,7 @@ class OperationContext;
namespace repl {
class OplogEntry;
class StorageInterfaceMock;
-}
+} // namespace repl
/**
* This is a basic fixture that is backed by an ephemeral storage engine and a mock replication
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 097029946be..5757ddfc267 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -342,8 +342,7 @@ void _logOpsInner(OperationContext* opCtx,
// are logging within one WriteUnitOfWork.
invariant(finalOpTime.getTimestamp() <= *commitTime,
str::stream() << "Final OpTime: " << finalOpTime.toString()
- << ". Commit Time: "
- << commitTime->toString());
+ << ". Commit Time: " << commitTime->toString());
}
// Optionally hang before advancing lastApplied.
@@ -397,7 +396,7 @@ OpTime logOp(OperationContext* opCtx, MutableOplogEntry* oplogEntry) {
// again. For example, if the WUOW gets aborted within a writeConflictRetry loop, we need to
// reset the OpTime to null so a new OpTime will be assigned on retry.
OplogSlot slot = oplogEntry->getOpTime();
- auto resetOpTimeGuard = makeGuard([&, resetOpTimeOnExit = bool(slot.isNull()) ] {
+ auto resetOpTimeGuard = makeGuard([&, resetOpTimeOnExit = bool(slot.isNull())] {
if (resetOpTimeOnExit)
oplogEntry->setOpTime(OplogSlot());
});
@@ -540,7 +539,7 @@ long long getNewOplogSizeBytes(OperationContext* opCtx, const ReplSettings& repl
LOG(3) << "32bit system; choosing " << sz << " bytes oplog";
return sz;
}
-// First choose a minimum size.
+ // First choose a minimum size.
#if defined(__APPLE__)
// typically these are desktops (dev machines), so keep it smallish
@@ -670,8 +669,7 @@ std::pair<OptionalCollectionUUID, NamespaceString> parseCollModUUIDAndNss(Operat
const auto nsByUUID = catalog.lookupNSSByUUID(uuid);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply operation due to missing collection (" << uuid
- << "): "
- << redact(cmd.toString()),
+ << "): " << redact(cmd.toString()),
nsByUUID);
return std::pair<OptionalCollectionUUID, NamespaceString>(uuid, *nsByUUID);
}
@@ -1225,8 +1223,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
collection = catalog.lookupCollectionByUUID(uuid);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply operation due to missing collection (" << uuid
- << "): "
- << redact(op.toString()),
+ << "): " << redact(op.toString()),
collection);
requestNss = collection->ns();
dassert(opCtx->lockState()->isCollectionLockedForMode(
diff --git a/src/mongo/db/repl/oplog_buffer_collection.cpp b/src/mongo/db/repl/oplog_buffer_collection.cpp
index 9cdc69823a8..642a1db0078 100644
--- a/src/mongo/db/repl/oplog_buffer_collection.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection.cpp
@@ -66,17 +66,16 @@ std::tuple<BSONObj, Timestamp, std::size_t> OplogBufferCollection::addIdToDocume
const BSONObj& orig, const Timestamp& lastTimestamp, std::size_t sentinelCount) {
if (orig.isEmpty()) {
return std::make_tuple(
- BSON(kIdFieldName << BSON(
- kTimestampFieldName << lastTimestamp << kSentinelFieldName
- << static_cast<long long>(sentinelCount + 1))),
+ BSON(kIdFieldName << BSON(kTimestampFieldName
+ << lastTimestamp << kSentinelFieldName
+ << static_cast<long long>(sentinelCount + 1))),
lastTimestamp,
sentinelCount + 1);
}
const auto ts = orig[kTimestampFieldName].timestamp();
invariant(!ts.isNull());
auto doc = BSON(kIdFieldName << BSON(kTimestampFieldName << ts << kSentinelFieldName << 0)
- << kOplogEntryFieldName
- << orig);
+ << kOplogEntryFieldName << orig);
return std::make_tuple(doc, ts, 0);
}
diff --git a/src/mongo/db/repl/oplog_buffer_collection_test.cpp b/src/mongo/db/repl/oplog_buffer_collection_test.cpp
index d50f214dfb9..ca8582fc754 100644
--- a/src/mongo/db/repl/oplog_buffer_collection_test.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection_test.cpp
@@ -118,12 +118,9 @@ NamespaceString makeNamespace(const T& t, const char* suffix = "") {
BSONObj makeOplogEntry(int t) {
return BSON("ts" << Timestamp(t, t) << "ns"
<< "a.a"
- << "v"
- << 2
- << "op"
+ << "v" << 2 << "op"
<< "i"
- << "o"
- << BSON("_id" << t << "a" << t));
+ << "o" << BSON("_id" << t << "a" << t));
}
TEST_F(OplogBufferCollectionTest, DefaultNamespace) {
@@ -603,7 +600,9 @@ TEST_F(OplogBufferCollectionTest, PopAndPeekReturnDocumentsInOrder) {
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(1), makeOplogEntry(2), makeOplogEntry(3),
+ makeOplogEntry(1),
+ makeOplogEntry(2),
+ makeOplogEntry(3),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.push(_opCtx.get(), oplog.begin(), oplog.end());
@@ -646,7 +645,9 @@ TEST_F(OplogBufferCollectionTest, LastObjectPushedReturnsNewestOplogEntry) {
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(1), makeOplogEntry(2), makeOplogEntry(3),
+ makeOplogEntry(1),
+ makeOplogEntry(2),
+ makeOplogEntry(3),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.push(_opCtx.get(), oplog.begin(), oplog.end());
@@ -682,7 +683,9 @@ TEST_F(OplogBufferCollectionTest,
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(3), makeOplogEntry(4), makeOplogEntry(5),
+ makeOplogEntry(3),
+ makeOplogEntry(4),
+ makeOplogEntry(5),
};
ASSERT_BSONOBJ_EQ(*oplogBuffer.lastObjectPushed(_opCtx.get()), secondDoc);
@@ -907,7 +910,12 @@ void _testPushSentinelsProperly(OperationContext* opCtx,
OplogBufferCollection oplogBuffer(storageInterface, nss);
oplogBuffer.startup(opCtx);
const std::vector<BSONObj> oplog = {
- BSONObj(), makeOplogEntry(1), BSONObj(), BSONObj(), makeOplogEntry(2), BSONObj(),
+ BSONObj(),
+ makeOplogEntry(1),
+ BSONObj(),
+ BSONObj(),
+ makeOplogEntry(2),
+ BSONObj(),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
pushDocsFn(opCtx, &oplogBuffer, oplog);
@@ -937,7 +945,8 @@ DEATH_TEST_F(
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(2), makeOplogEntry(1),
+ makeOplogEntry(2),
+ makeOplogEntry(1),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.push(_opCtx.get(), oplog.begin(), oplog.end());
@@ -949,7 +958,10 @@ TEST_F(OplogBufferCollectionTest, SentinelInMiddleIsReturnedInOrder) {
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(1), makeOplogEntry(2), BSONObj(), makeOplogEntry(3),
+ makeOplogEntry(1),
+ makeOplogEntry(2),
+ BSONObj(),
+ makeOplogEntry(3),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.push(_opCtx.get(), oplog.cbegin(), oplog.cend());
@@ -1066,7 +1078,12 @@ TEST_F(OplogBufferCollectionTest, MultipleSentinelsAreReturnedInOrder) {
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- BSONObj(), makeOplogEntry(1), BSONObj(), BSONObj(), makeOplogEntry(2), BSONObj(),
+ BSONObj(),
+ makeOplogEntry(1),
+ BSONObj(),
+ BSONObj(),
+ makeOplogEntry(2),
+ BSONObj(),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.push(_opCtx.get(), oplog.cbegin(), oplog.cend());
diff --git a/src/mongo/db/repl/oplog_entry.cpp b/src/mongo/db/repl/oplog_entry.cpp
index 57a8eb033ba..f41558f2e4c 100644
--- a/src/mongo/db/repl/oplog_entry.cpp
+++ b/src/mongo/db/repl/oplog_entry.cpp
@@ -81,8 +81,7 @@ OplogEntry::CommandType parseCommandType(const BSONObj& objectField) {
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "Unknown oplog entry command type: " << commandString
- << " Object field: "
- << redact(objectField));
+ << " Object field: " << redact(objectField));
}
MONGO_UNREACHABLE;
}
@@ -202,7 +201,7 @@ StatusWith<MutableOplogEntry> MutableOplogEntry::parse(const BSONObj& object) {
MONGO_UNREACHABLE;
}
-void MutableOplogEntry::setOpTime(const OpTime& opTime)& {
+void MutableOplogEntry::setOpTime(const OpTime& opTime) & {
setTimestamp(opTime.getTimestamp());
if (opTime.getTerm() != OpTime::kUninitializedTerm)
setTerm(opTime.getTerm());
diff --git a/src/mongo/db/repl/oplog_entry.h b/src/mongo/db/repl/oplog_entry.h
index 7d98aee00f0..24c47432508 100644
--- a/src/mongo/db/repl/oplog_entry.h
+++ b/src/mongo/db/repl/oplog_entry.h
@@ -147,14 +147,15 @@ public:
class OplogEntry : private MutableOplogEntry {
public:
// Make field names accessible.
- using MutableOplogEntry::kDurableReplOperationFieldName;
- using MutableOplogEntry::kOperationSessionInfoFieldName;
using MutableOplogEntry::k_idFieldName;
+ using MutableOplogEntry::kDurableReplOperationFieldName;
using MutableOplogEntry::kFromMigrateFieldName;
using MutableOplogEntry::kHashFieldName;
using MutableOplogEntry::kNssFieldName;
- using MutableOplogEntry::kObjectFieldName;
using MutableOplogEntry::kObject2FieldName;
+ using MutableOplogEntry::kObjectFieldName;
+ using MutableOplogEntry::kOperationSessionInfoFieldName;
+ using MutableOplogEntry::kOplogVersion;
using MutableOplogEntry::kOpTypeFieldName;
using MutableOplogEntry::kPostImageOpTimeFieldName;
using MutableOplogEntry::kPreImageOpTimeFieldName;
@@ -168,38 +169,37 @@ public:
using MutableOplogEntry::kUuidFieldName;
using MutableOplogEntry::kVersionFieldName;
using MutableOplogEntry::kWallClockTimeFieldName;
- using MutableOplogEntry::kOplogVersion;
// Make serialize(), toBSON() and getters accessible.
- using MutableOplogEntry::serialize;
- using MutableOplogEntry::toBSON;
- using MutableOplogEntry::getOperationSessionInfo;
- using MutableOplogEntry::getSessionId;
- using MutableOplogEntry::getTxnNumber;
+ using MutableOplogEntry::get_id;
using MutableOplogEntry::getDurableReplOperation;
- using MutableOplogEntry::getOpType;
+ using MutableOplogEntry::getFromMigrate;
+ using MutableOplogEntry::getHash;
using MutableOplogEntry::getNss;
- using MutableOplogEntry::getUuid;
using MutableOplogEntry::getObject;
using MutableOplogEntry::getObject2;
- using MutableOplogEntry::getUpsert;
- using MutableOplogEntry::getTimestamp;
+ using MutableOplogEntry::getOperationSessionInfo;
+ using MutableOplogEntry::getOpType;
+ using MutableOplogEntry::getPostImageOpTime;
+ using MutableOplogEntry::getPreImageOpTime;
+ using MutableOplogEntry::getPrevWriteOpTimeInTransaction;
+ using MutableOplogEntry::getSessionId;
+ using MutableOplogEntry::getStatementId;
using MutableOplogEntry::getTerm;
- using MutableOplogEntry::getHash;
+ using MutableOplogEntry::getTimestamp;
+ using MutableOplogEntry::getTxnNumber;
+ using MutableOplogEntry::getUpsert;
+ using MutableOplogEntry::getUuid;
using MutableOplogEntry::getVersion;
- using MutableOplogEntry::getFromMigrate;
- using MutableOplogEntry::get_id;
using MutableOplogEntry::getWallClockTime;
- using MutableOplogEntry::getStatementId;
- using MutableOplogEntry::getPrevWriteOpTimeInTransaction;
- using MutableOplogEntry::getPreImageOpTime;
- using MutableOplogEntry::getPostImageOpTime;
+ using MutableOplogEntry::serialize;
+ using MutableOplogEntry::toBSON;
// Make helper functions accessible.
using MutableOplogEntry::getOpTime;
+ using MutableOplogEntry::makeDeleteOperation;
using MutableOplogEntry::makeInsertOperation;
using MutableOplogEntry::makeUpdateOperation;
- using MutableOplogEntry::makeDeleteOperation;
enum class CommandType {
kNotCommand,
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index 9c4df1a1bec..3162319ab2b 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -161,11 +161,10 @@ Status checkRemoteOplogStart(const Fetcher::Documents& documents,
// sync source is now behind us, choose a new sync source to prevent going into rollback.
if (remoteLastOpApplied && (*remoteLastOpApplied < lastFetched)) {
return Status(ErrorCodes::InvalidSyncSource,
- str::stream() << "Sync source's last applied OpTime "
- << remoteLastOpApplied->toString()
- << " is older than our last fetched OpTime "
- << lastFetched.toString()
- << ". Choosing new sync source.");
+ str::stream()
+ << "Sync source's last applied OpTime " << remoteLastOpApplied->toString()
+ << " is older than our last fetched OpTime " << lastFetched.toString()
+ << ". Choosing new sync source.");
}
// If 'requireFresherSyncSource' is true, we must check that the sync source's
@@ -181,8 +180,7 @@ Status checkRemoteOplogStart(const Fetcher::Documents& documents,
return Status(ErrorCodes::InvalidSyncSource,
str::stream()
<< "Sync source must be ahead of me. My last fetched oplog optime: "
- << lastFetched.toString()
- << ", latest oplog optime of sync source: "
+ << lastFetched.toString() << ", latest oplog optime of sync source: "
<< remoteLastOpApplied->toString());
}
@@ -202,9 +200,7 @@ Status checkRemoteOplogStart(const Fetcher::Documents& documents,
return Status(ErrorCodes::InvalidBSON,
str::stream() << "our last optime fetched: " << lastFetched.toString()
<< ". failed to parse optime from first oplog on source: "
- << o.toString()
- << ": "
- << opTimeResult.getStatus().toString());
+ << o.toString() << ": " << opTimeResult.getStatus().toString());
}
auto opTime = opTimeResult.getValue();
if (opTime != lastFetched) {
@@ -289,15 +285,9 @@ StatusWith<OplogFetcher::DocumentsInfo> OplogFetcher::validateDocuments(
if (lastTS >= docTS) {
return Status(ErrorCodes::OplogOutOfOrder,
str::stream() << "Out of order entries in oplog. lastTS: "
- << lastTS.toString()
- << " outOfOrderTS:"
- << docTS.toString()
- << " in batch with "
- << info.networkDocumentCount
- << "docs; first-batch:"
- << first
- << ", doc:"
- << doc);
+ << lastTS.toString() << " outOfOrderTS:" << docTS.toString()
+ << " in batch with " << info.networkDocumentCount
+ << "docs; first-batch:" << first << ", doc:" << doc);
}
lastTS = docTS;
}
diff --git a/src/mongo/db/repl/oplog_interface_mock.cpp b/src/mongo/db/repl/oplog_interface_mock.cpp
index 6352fa7566a..95930bf6d62 100644
--- a/src/mongo/db/repl/oplog_interface_mock.cpp
+++ b/src/mongo/db/repl/oplog_interface_mock.cpp
@@ -90,8 +90,7 @@ public:
str::stream()
<< "oplog no longer contains the complete write history of this "
"transaction, log with opTime "
- << _nextOpTime.toBSON()
- << " cannot be found");
+ << _nextOpTime.toBSON() << " cannot be found");
}
// We shouldn't get any other error.
MONGO_UNREACHABLE;
diff --git a/src/mongo/db/repl/oplog_test.cpp b/src/mongo/db/repl/oplog_test.cpp
index 870cd21980e..a39208720ce 100644
--- a/src/mongo/db/repl/oplog_test.cpp
+++ b/src/mongo/db/repl/oplog_test.cpp
@@ -118,9 +118,9 @@ TEST_F(OplogTest, LogOpReturnsOpTimeOnSuccessfulInsertIntoOplogCollection) {
<< "OpTime returned from logOp() did not match that in the oplog entry written to the "
"oplog: "
<< oplogEntry.toBSON();
- ASSERT(OpTypeEnum::kNoop == oplogEntry.getOpType()) << "Expected 'n' op type but found '"
- << OpType_serializer(oplogEntry.getOpType())
- << "' instead: " << oplogEntry.toBSON();
+ ASSERT(OpTypeEnum::kNoop == oplogEntry.getOpType())
+ << "Expected 'n' op type but found '" << OpType_serializer(oplogEntry.getOpType())
+ << "' instead: " << oplogEntry.toBSON();
ASSERT_BSONOBJ_EQ(msgObj, oplogEntry.getObject());
// Ensure that the msg optime returned is the same as the last optime in the ReplClientInfo.
diff --git a/src/mongo/db/repl/optime_extract_test.cpp b/src/mongo/db/repl/optime_extract_test.cpp
index d1e2b0d7e49..9192738a31c 100644
--- a/src/mongo/db/repl/optime_extract_test.cpp
+++ b/src/mongo/db/repl/optime_extract_test.cpp
@@ -51,8 +51,7 @@ TEST(ExtractBSON, ExtractOpTimeField) {
// Missing timestamp field.
obj = BSON("a" << BSON("ts"
<< "notATimestamp"
- << "t"
- << 2));
+ << "t" << 2));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, bsonExtractOpTimeField(obj, "a", &opTime));
// Wrong typed timestamp field.
obj = BSON("a" << BSON("t" << 2));
diff --git a/src/mongo/db/repl/read_concern_args.cpp b/src/mongo/db/repl/read_concern_args.cpp
index 89545a0fb4a..5ec5ae968c1 100644
--- a/src/mongo/db/repl/read_concern_args.cpp
+++ b/src/mongo/db/repl/read_concern_args.cpp
@@ -202,23 +202,20 @@ Status ReadConcernArgs::initialize(const BSONElement& readConcernElem) {
} else {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Unrecognized option in " << kReadConcernFieldName
- << ": "
- << fieldName);
+ << ": " << fieldName);
}
}
if (_afterClusterTime && _opTime) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Can not specify both " << kAfterClusterTimeFieldName
- << " and "
- << kAfterOpTimeFieldName);
+ << " and " << kAfterOpTimeFieldName);
}
if (_afterClusterTime && _atClusterTime) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Can not specify both " << kAfterClusterTimeFieldName
- << " and "
- << kAtClusterTimeFieldName);
+ << " and " << kAtClusterTimeFieldName);
}
// Note: 'available' should not be used with after cluster time, as cluster time can wait for
@@ -228,30 +225,24 @@ Status ReadConcernArgs::initialize(const BSONElement& readConcernElem) {
getLevel() != ReadConcernLevel::kLocalReadConcern &&
getLevel() != ReadConcernLevel::kSnapshotReadConcern) {
return Status(ErrorCodes::InvalidOptions,
- str::stream() << kAfterClusterTimeFieldName << " field can be set only if "
- << kLevelFieldName
- << " is equal to "
- << kMajorityReadConcernStr
- << ", "
- << kLocalReadConcernStr
- << ", or "
- << kSnapshotReadConcernStr);
+ str::stream()
+ << kAfterClusterTimeFieldName << " field can be set only if "
+ << kLevelFieldName << " is equal to " << kMajorityReadConcernStr << ", "
+ << kLocalReadConcernStr << ", or " << kSnapshotReadConcernStr);
}
if (_opTime && getLevel() == ReadConcernLevel::kSnapshotReadConcern) {
return Status(ErrorCodes::InvalidOptions,
- str::stream() << kAfterOpTimeFieldName << " field cannot be set if "
- << kLevelFieldName
- << " is equal to "
- << kSnapshotReadConcernStr);
+ str::stream()
+ << kAfterOpTimeFieldName << " field cannot be set if " << kLevelFieldName
+ << " is equal to " << kSnapshotReadConcernStr);
}
if (_atClusterTime && getLevel() != ReadConcernLevel::kSnapshotReadConcern) {
return Status(ErrorCodes::InvalidOptions,
- str::stream() << kAtClusterTimeFieldName << " field can be set only if "
- << kLevelFieldName
- << " is equal to "
- << kSnapshotReadConcernStr);
+ str::stream()
+ << kAtClusterTimeFieldName << " field can be set only if "
+ << kLevelFieldName << " is equal to " << kSnapshotReadConcernStr);
}
if (_afterClusterTime && _afterClusterTime == LogicalTime::kUninitialized) {
@@ -294,8 +285,7 @@ Status ReadConcernArgs::upconvertReadConcernLevelToSnapshot() {
if (_opTime) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Cannot upconvert the readConcern level to 'snapshot' when '"
- << kAfterOpTimeFieldName
- << "' is provided");
+ << kAfterOpTimeFieldName << "' is provided");
}
_originalLevel = _level;
diff --git a/src/mongo/db/repl/read_concern_args_test.cpp b/src/mongo/db/repl/read_concern_args_test.cpp
index ed6ec48875c..d6907a31f26 100644
--- a/src/mongo/db/repl/read_concern_args_test.cpp
+++ b/src/mongo/db/repl/read_concern_args_test.cpp
@@ -39,13 +39,12 @@ namespace {
TEST(ReadAfterParse, OpTimeOnly) {
ReadConcernArgs readConcern;
- ASSERT_OK(readConcern.initialize(BSON(
- "find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
- << 2)))));
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30)
+ << OpTime::kTermFieldName << 2)))));
ASSERT_TRUE(readConcern.getArgsOpTime());
ASSERT_TRUE(!readConcern.getArgsAfterClusterTime());
@@ -59,8 +58,7 @@ TEST(ReadAfterParse, AfterClusterTimeOnly) {
ReadConcernArgs readConcern;
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterClusterTimeFieldName
<< afterClusterTime.asTimestamp()))));
auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
@@ -73,13 +71,12 @@ TEST(ReadAfterParse, AfterClusterTimeAndLevelLocal) {
ReadConcernArgs readConcern;
// Must have level=majority
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "local"))));
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "local"))));
auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
ASSERT_TRUE(argsAfterClusterTime);
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -91,13 +88,12 @@ TEST(ReadAfterParse, AfterClusterTimeAndLevelMajority) {
ReadConcernArgs readConcern;
// Must have level=majority
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "majority"))));
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "majority"))));
auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
ASSERT_TRUE(argsAfterClusterTime);
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -108,13 +104,12 @@ TEST(ReadAfterParse, AfterClusterTimeAndLevelMajority) {
TEST(ReadAfterParse, AfterClusterTimeAndLevelSnapshot) {
ReadConcernArgs readConcern;
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
ASSERT_TRUE(argsAfterClusterTime);
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -127,8 +122,7 @@ TEST(ReadAfterParse, AtClusterTimeOnly) {
auto atClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_EQ(ErrorCodes::InvalidOptions,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAtClusterTimeFieldName
<< atClusterTime.asTimestamp()))));
}
@@ -136,13 +130,12 @@ TEST(ReadAfterParse, AtClusterTimeOnly) {
TEST(ReadAfterParse, AtClusterTimeAndLevelSnapshot) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
auto argsAtClusterTime = readConcern.getArgsAtClusterTime();
ASSERT_TRUE(argsAtClusterTime);
ASSERT_FALSE(readConcern.getArgsOpTime());
@@ -153,40 +146,37 @@ TEST(ReadAfterParse, AtClusterTimeAndLevelSnapshot) {
TEST(ReadAfterParse, AtClusterTimeAndLevelMajority) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "majority"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "majority"))));
}
TEST(ReadAfterParse, AtClusterTimeAndLevelLocal) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "local"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "local"))));
}
TEST(ReadAfterParse, AtClusterTimeAndLevelAvailable) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "available"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "available"))));
}
TEST(ReadAfterParse, AtClusterTimeAndLevelLinearizable) {
@@ -194,8 +184,7 @@ TEST(ReadAfterParse, AtClusterTimeAndLevelLinearizable) {
auto atClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_EQ(ErrorCodes::InvalidOptions,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAtClusterTimeFieldName
<< atClusterTime.asTimestamp()
<< ReadConcernArgs::kLevelFieldName
@@ -206,8 +195,7 @@ TEST(ReadAfterParse, LevelMajorityOnly) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "majority"))));
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -219,8 +207,7 @@ TEST(ReadAfterParse, LevelSnapshotOnly) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "snapshot"))));
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -234,15 +221,12 @@ TEST(ReadAfterParse, ReadCommittedFullSpecification) {
auto afterClusterTime = LogicalTime(Timestamp(100, 200));
ASSERT_NOT_OK(readConcern.initialize(BSON(
"find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName
<< BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
<< 2)
- << ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "majority"))));
+ << ReadConcernArgs::kAfterClusterTimeFieldName << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "majority"))));
}
TEST(ReadAfterParse, Empty) {
@@ -257,58 +241,51 @@ TEST(ReadAfterParse, Empty) {
TEST(ReadAfterParse, BadRootType) {
ReadConcernArgs readConcern;
- ASSERT_NOT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << "x")));
+ ASSERT_NOT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName << "x")));
}
TEST(ReadAfterParse, BadAtClusterTimeType) {
ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::TypeMismatch,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << 2
- << ReadConcernArgs::kLevelFieldName
+ << 2 << ReadConcernArgs::kLevelFieldName
<< "snapshot"))));
}
TEST(ReadAfterParse, BadAtClusterTimeValue) {
ReadConcernArgs readConcern;
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << LogicalTime::kUninitialized.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << LogicalTime::kUninitialized.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
}
TEST(ReadAfterParse, BadOpTimeType) {
ReadConcernArgs readConcern;
ASSERT_NOT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName << 2))));
}
TEST(ReadAfterParse, OpTimeNotNeededForValidReadConcern) {
ReadConcernArgs readConcern;
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSONObj())));
}
TEST(ReadAfterParse, NoOpTimeTS) {
ReadConcernArgs readConcern;
ASSERT_NOT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName
<< BSON(OpTime::kTimestampFieldName << 2)))));
}
@@ -316,40 +293,36 @@ TEST(ReadAfterParse, NoOpTimeTS) {
TEST(ReadAfterParse, NoOpTimeTerm) {
ReadConcernArgs readConcern;
ASSERT_NOT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName
<< BSON(OpTime::kTermFieldName << 2)))));
}
TEST(ReadAfterParse, BadOpTimeTSType) {
ReadConcernArgs readConcern;
- ASSERT_NOT_OK(readConcern.initialize(
- BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << BSON("x" << 1) << OpTime::kTermFieldName
- << 2)))));
+ ASSERT_NOT_OK(readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << BSON("x" << 1)
+ << OpTime::kTermFieldName << 2)))));
}
TEST(ReadAfterParse, BadOpTimeTermType) {
ReadConcernArgs readConcern;
- ASSERT_NOT_OK(readConcern.initialize(BSON(
- "find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(1, 0) << OpTime::kTermFieldName
- << "y")))));
+ ASSERT_NOT_OK(readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(1, 0)
+ << OpTime::kTermFieldName << "y")))));
}
TEST(ReadAfterParse, BadLevelType) {
ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::TypeMismatch,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << 7))));
}
@@ -357,8 +330,7 @@ TEST(ReadAfterParse, BadLevelValue) {
ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::FailedToParse,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName
<< "seven is not a real level"))));
}
@@ -367,39 +339,35 @@ TEST(ReadAfterParse, BadOption) {
ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::InvalidOptions,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON("asdf" << 1))));
}
TEST(ReadAfterParse, AtClusterTimeAndAfterClusterTime) {
ReadConcernArgs readConcern;
auto clusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << clusterTime.asTimestamp()
- << ReadConcernArgs::kAfterClusterTimeFieldName
- << clusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << clusterTime.asTimestamp()
+ << ReadConcernArgs::kAfterClusterTimeFieldName
+ << clusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
}
TEST(ReadAfterParse, AfterOpTimeAndLevelSnapshot) {
ReadConcernArgs readConcern;
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName
- << Timestamp(20, 30)
- << OpTime::kTermFieldName
- << 2)
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30) << OpTime::kTermFieldName
+ << 2)
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
}
TEST(ReadAfterSerialize, Empty) {
@@ -430,10 +398,10 @@ TEST(ReadAfterSerialize, AfterOpTimeOnly) {
ReadConcernArgs readConcern(OpTime(Timestamp(20, 30), 2), boost::none);
readConcern.appendInfo(&builder);
- BSONObj expectedObj(BSON(
- ReadConcernArgs::kReadConcernFieldName << BSON(
- ReadConcernArgs::kAfterOpTimeFieldName << BSON(
- OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
+ BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName << BSON(
+ OpTime::kTimestampFieldName
+ << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
@@ -455,11 +423,10 @@ TEST(ReadAfterSerialize, iAfterCLusterTimeAndLevel) {
ReadConcernArgs readConcern(afterClusterTime, ReadConcernLevel::kMajorityReadConcern);
readConcern.appendInfo(&builder);
- BSONObj expectedObj(
- BSON(ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kLevelFieldName << "majority"
- << ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp())));
+ BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName
+ << "majority" << ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp())));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
@@ -470,13 +437,11 @@ TEST(ReadAfterSerialize, AfterOpTimeAndLevel) {
ReadConcernLevel::kMajorityReadConcern);
readConcern.appendInfo(&builder);
- BSONObj expectedObj(BSON(
- ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kLevelFieldName
- << "majority"
- << ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
- << 2))));
+ BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName << BSON(
+ ReadConcernArgs::kLevelFieldName
+ << "majority" << ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
@@ -486,8 +451,7 @@ TEST(ReadAfterSerialize, AtClusterTimeAndLevelSnapshot) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName
<< "snapshot"
<< ReadConcernArgs::kAtClusterTimeFieldName
@@ -495,11 +459,10 @@ TEST(ReadAfterSerialize, AtClusterTimeAndLevelSnapshot) {
readConcern.appendInfo(&builder);
- BSONObj expectedObj(
- BSON(ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kLevelFieldName << "snapshot"
- << ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp())));
+ BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName
+ << "snapshot" << ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp())));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
@@ -516,8 +479,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, EmptyLevel) {
TEST(UpconvertReadConcernLevelToSnapshot, LevelLocal) {
ReadConcernArgs readConcern;
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "local"))));
ASSERT(ReadConcernLevel::kLocalReadConcern == readConcern.getLevel());
@@ -530,8 +492,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelMajority) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "majority"))));
ASSERT(ReadConcernLevel::kMajorityReadConcern == readConcern.getLevel());
@@ -544,8 +505,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelSnapshot) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "snapshot"))));
ASSERT(ReadConcernLevel::kSnapshotReadConcern == readConcern.getLevel());
@@ -558,8 +518,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelSnapshotWithAtClusterTime) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName
<< "snapshot"
<< ReadConcernArgs::kAtClusterTimeFieldName
@@ -577,8 +536,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, AfterClusterTime) {
ReadConcernArgs readConcern;
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterClusterTimeFieldName
<< afterClusterTime.asTimestamp()))));
ASSERT(ReadConcernLevel::kLocalReadConcern == readConcern.getLevel());
@@ -594,8 +552,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelAvailable) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "available"))));
ASSERT(ReadConcernLevel::kAvailableReadConcern == readConcern.getLevel());
@@ -608,8 +565,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelLinearizable) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "linearizable"))));
ASSERT(ReadConcernLevel::kLinearizableReadConcern == readConcern.getLevel());
@@ -620,13 +576,12 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelLinearizable) {
TEST(UpconvertReadConcernLevelToSnapshot, AfterOpTime) {
ReadConcernArgs readConcern;
- ASSERT_OK(readConcern.initialize(BSON(
- "find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
- << 2)))));
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30)
+ << OpTime::kTermFieldName << 2)))));
ASSERT(ReadConcernLevel::kLocalReadConcern == readConcern.getLevel());
ASSERT_TRUE(readConcern.getArgsOpTime());
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index 599244b583f..d614be0d7a9 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -330,12 +330,12 @@ public:
HostAndPort me = someHostAndPortForMe();
auto appendMember =
- [&members, serial = DecimalCounter<uint32_t>() ](const HostAndPort& host) mutable {
- members.append(
- StringData{serial},
- BSON("_id" << static_cast<int>(serial) << "host" << host.toString()));
- ++serial;
- };
+ [&members, serial = DecimalCounter<uint32_t>()](const HostAndPort& host) mutable {
+ members.append(
+ StringData{serial},
+ BSON("_id" << static_cast<int>(serial) << "host" << host.toString()));
+ ++serial;
+ };
appendMember(me);
result.append("me", me.toString());
for (const HostAndPort& seed : seeds) {
diff --git a/src/mongo/db/repl/repl_set_config.cpp b/src/mongo/db/repl/repl_set_config.cpp
index 5577064f455..0e753e5c654 100644
--- a/src/mongo/db/repl/repl_set_config.cpp
+++ b/src/mongo/db/repl/repl_set_config.cpp
@@ -138,17 +138,16 @@ Status ReplSetConfig::_initialize(const BSONObj& cfg, bool forInitiate, OID defa
if (memberElement.type() != Object) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected type of " << kMembersFieldName << "."
- << memberElement.fieldName()
- << " to be Object, but found "
+ << memberElement.fieldName() << " to be Object, but found "
<< typeName(memberElement.type()));
}
const auto& memberBSON = memberElement.Obj();
try {
_members.emplace_back(memberBSON, &_tagConfig);
} catch (const DBException& ex) {
- return Status(
- ErrorCodes::InvalidReplicaSetConfig,
- str::stream() << ex.toStatus().toString() << " for member:" << memberBSON);
+ return Status(ErrorCodes::InvalidReplicaSetConfig,
+ str::stream()
+ << ex.toStatus().toString() << " for member:" << memberBSON);
}
}
@@ -348,43 +347,35 @@ Status ReplSetConfig::_parseSettingsSubdocument(const BSONObj& settings) {
if (_customWriteConcernModes.find(modeElement.fieldNameStringData()) !=
_customWriteConcernModes.end()) {
return Status(ErrorCodes::Error(51001),
- str::stream() << kSettingsFieldName << '.' << kGetLastErrorModesFieldName
- << " contains multiple fields named "
- << modeElement.fieldName());
+ str::stream()
+ << kSettingsFieldName << '.' << kGetLastErrorModesFieldName
+ << " contains multiple fields named " << modeElement.fieldName());
}
if (modeElement.type() != Object) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Expected " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName
- << '.'
- << modeElement.fieldName()
- << " to be an Object, not "
- << typeName(modeElement.type()));
+ str::stream()
+ << "Expected " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName << '.' << modeElement.fieldName()
+ << " to be an Object, not " << typeName(modeElement.type()));
}
ReplSetTagPattern pattern = _tagConfig.makePattern();
for (auto&& constraintElement : modeElement.Obj()) {
if (!constraintElement.isNumber()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Expected " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName
- << '.'
- << modeElement.fieldName()
- << '.'
- << constraintElement.fieldName()
- << " to be a number, not "
- << typeName(constraintElement.type()));
+ str::stream()
+ << "Expected " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName << '.' << modeElement.fieldName()
+ << '.' << constraintElement.fieldName() << " to be a number, not "
+ << typeName(constraintElement.type()));
}
const int minCount = constraintElement.numberInt();
if (minCount <= 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Value of " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName
- << '.'
- << modeElement.fieldName()
- << '.'
- << constraintElement.fieldName()
- << " must be positive, but found "
- << minCount);
+ str::stream()
+ << "Value of " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName << '.' << modeElement.fieldName()
+ << '.' << constraintElement.fieldName()
+ << " must be positive, but found " << minCount);
}
status = _tagConfig.addTagCountConstraintToPattern(
&pattern, constraintElement.fieldNameStringData(), minCount);
@@ -420,8 +411,7 @@ Status ReplSetConfig::validate() const {
if (_replSetName.empty()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Replica set configuration must have non-empty "
- << kIdFieldName
- << " field");
+ << kIdFieldName << " field");
}
if (_heartbeatInterval < Milliseconds(0)) {
return Status(ErrorCodes::BadValue,
@@ -506,41 +496,22 @@ Status ReplSetConfig::validate() const {
const MemberConfig& memberJ = _members[j];
if (memberI.getId() == memberJ.getId()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Found two member configurations with same "
- << MemberConfig::kIdFieldName
- << " field, "
- << kMembersFieldName
- << "."
- << i
- << "."
- << MemberConfig::kIdFieldName
- << " == "
- << kMembersFieldName
- << "."
- << j
- << "."
- << MemberConfig::kIdFieldName
- << " == "
- << memberI.getId());
+ str::stream()
+ << "Found two member configurations with same "
+ << MemberConfig::kIdFieldName << " field, " << kMembersFieldName
+ << "." << i << "." << MemberConfig::kIdFieldName
+ << " == " << kMembersFieldName << "." << j << "."
+ << MemberConfig::kIdFieldName << " == " << memberI.getId());
}
if (memberI.getHostAndPort() == memberJ.getHostAndPort()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Found two member configurations with same "
- << MemberConfig::kHostFieldName
- << " field, "
- << kMembersFieldName
- << "."
- << i
- << "."
- << MemberConfig::kHostFieldName
- << " == "
- << kMembersFieldName
- << "."
- << j
- << "."
- << MemberConfig::kHostFieldName
- << " == "
- << memberI.getHostAndPort().toString());
+ str::stream()
+ << "Found two member configurations with same "
+ << MemberConfig::kHostFieldName << " field, " << kMembersFieldName
+ << "." << i << "." << MemberConfig::kHostFieldName
+ << " == " << kMembersFieldName << "." << j << "."
+ << MemberConfig::kHostFieldName
+ << " == " << memberI.getHostAndPort().toString());
}
}
}
@@ -593,9 +564,7 @@ Status ReplSetConfig::validate() const {
str::stream()
<< "Either all host names in a replica set configuration must be localhost "
"references, or none must be; found "
- << localhostCount
- << " out of "
- << _members.size());
+ << localhostCount << " out of " << _members.size());
}
if (voterCount > kMaxVotingMembers || voterCount == 0) {
@@ -636,9 +605,9 @@ Status ReplSetConfig::validate() const {
}
if (_protocolVersion != 1) {
return Status(ErrorCodes::BadValue,
- str::stream() << kProtocolVersionFieldName
- << " of 1 is the only supported value. Found: "
- << _protocolVersion);
+ str::stream()
+ << kProtocolVersionFieldName
+ << " of 1 is the only supported value. Found: " << _protocolVersion);
}
if (_configServer) {
@@ -708,8 +677,7 @@ Status ReplSetConfig::checkIfWriteConcernCanBeSatisfied(
// write concern mode.
return Status(ErrorCodes::UnsatisfiableWriteConcern,
str::stream() << "Not enough nodes match write concern mode \""
- << writeConcern.wMode
- << "\"");
+ << writeConcern.wMode << "\"");
} else {
int nodesRemaining = writeConcern.wNumNodes;
for (size_t j = 0; j < _members.size(); ++j) {
diff --git a/src/mongo/db/repl/repl_set_config_checks.cpp b/src/mongo/db/repl/repl_set_config_checks.cpp
index 14cc8e99e61..5c0eeecdb97 100644
--- a/src/mongo/db/repl/repl_set_config_checks.cpp
+++ b/src/mongo/db/repl/repl_set_config_checks.cpp
@@ -63,10 +63,8 @@ StatusWith<int> findSelfInConfig(ReplicationCoordinatorExternalState* externalSt
if (meConfigs.empty()) {
return StatusWith<int>(ErrorCodes::NodeNotFound,
str::stream() << "No host described in new configuration "
- << newConfig.getConfigVersion()
- << " for replica set "
- << newConfig.getReplSetName()
- << " maps to this node");
+ << newConfig.getConfigVersion() << " for replica set "
+ << newConfig.getReplSetName() << " maps to this node");
}
if (meConfigs.size() > 1) {
str::stream message;
@@ -95,11 +93,9 @@ Status checkElectable(const ReplSetConfig& newConfig, int configIndex) {
if (!myConfig.isElectable()) {
return Status(ErrorCodes::NodeNotElectable,
str::stream() << "This node, " << myConfig.getHostAndPort().toString()
- << ", with _id "
- << myConfig.getId()
+ << ", with _id " << myConfig.getId()
<< " is not electable under the new configuration version "
- << newConfig.getConfigVersion()
- << " for replica set "
+ << newConfig.getConfigVersion() << " for replica set "
<< newConfig.getReplSetName());
}
return Status::OK();
@@ -133,8 +129,7 @@ Status validateArbiterPriorities(const ReplSetConfig& config) {
if (iter->isArbiter() && iter->getPriority() != 0) {
return Status(ErrorCodes::InvalidReplicaSetConfig,
str::stream() << "Member " << iter->getHostAndPort().toString()
- << " is an arbiter but has priority "
- << iter->getPriority()
+ << " is an arbiter but has priority " << iter->getPriority()
<< ". Arbiter priority must be 0.");
}
}
@@ -164,10 +159,8 @@ Status validateOldAndNewConfigsCompatible(const ReplSetConfig& oldConfig,
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream()
<< "New replica set configuration version must be greater than old, but "
- << newConfig.getConfigVersion()
- << " is not greater than "
- << oldConfig.getConfigVersion()
- << " for replica set "
+ << newConfig.getConfigVersion() << " is not greater than "
+ << oldConfig.getConfigVersion() << " for replica set "
<< newConfig.getReplSetName());
}
@@ -175,8 +168,7 @@ Status validateOldAndNewConfigsCompatible(const ReplSetConfig& oldConfig,
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream() << "New and old configurations differ in replica set name; "
"old was "
- << oldConfig.getReplSetName()
- << ", and new is "
+ << oldConfig.getReplSetName() << ", and new is "
<< newConfig.getReplSetName());
}
@@ -184,8 +176,7 @@ Status validateOldAndNewConfigsCompatible(const ReplSetConfig& oldConfig,
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream() << "New and old configurations differ in replica set ID; "
"old was "
- << oldConfig.getReplicaSetId()
- << ", and new is "
+ << oldConfig.getReplicaSetId() << ", and new is "
<< newConfig.getReplicaSetId());
}
@@ -216,18 +207,14 @@ Status validateOldAndNewConfigsCompatible(const ReplSetConfig& oldConfig,
}
if (hostsEqual && !idsEqual) {
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- str::stream() << "New and old configurations both have members with "
- << MemberConfig::kHostFieldName
- << " of "
- << mOld->getHostAndPort().toString()
- << " but in the new configuration the "
- << MemberConfig::kIdFieldName
- << " field is "
- << mNew->getId()
- << " and in the old configuration it is "
- << mOld->getId()
- << " for replica set "
- << newConfig.getReplSetName());
+ str::stream()
+ << "New and old configurations both have members with "
+ << MemberConfig::kHostFieldName << " of "
+ << mOld->getHostAndPort().toString()
+ << " but in the new configuration the "
+ << MemberConfig::kIdFieldName << " field is " << mNew->getId()
+ << " and in the old configuration it is " << mOld->getId()
+ << " for replica set " << newConfig.getReplSetName());
}
// At this point, the _id and host fields are equal, so we're looking at the old and
// new configurations for the same member node.
diff --git a/src/mongo/db/repl/repl_set_config_checks_test.cpp b/src/mongo/db/repl/repl_set_config_checks_test.cpp
index b8579f1f6e2..c887e11f69b 100644
--- a/src/mongo/db/repl/repl_set_config_checks_test.cpp
+++ b/src/mongo/db/repl/repl_set_config_checks_test.cpp
@@ -49,34 +49,28 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_VersionMustBe1) {
rses.addSelf(HostAndPort("h1"));
ReplSetConfig config;
- ASSERT_OK(config.initializeForInitiate(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")))));
+ ASSERT_OK(
+ config.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")))));
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
validateConfigForInitiate(&rses, config, getGlobalServiceContext()).getStatus());
}
TEST_F(ServiceContextTest, ValidateConfigForInitiate_MustFindSelf) {
ReplSetConfig config;
- ASSERT_OK(config.initializeForInitiate(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2")
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(
+ config.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ReplicationCoordinatorExternalStateMock notPresentExternalState;
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
@@ -99,21 +93,17 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_MustFindSelf) {
TEST_F(ServiceContextTest, ValidateConfigForInitiate_SelfMustBeElectable) {
ReplSetConfig config;
- ASSERT_OK(config.initializeForInitiate(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "priority"
- << 0)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(
+ config.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
@@ -128,11 +118,7 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_WriteConcernMustBeSatisfiab
ASSERT_OK(
config.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1"))
<< "settings"
@@ -152,55 +138,37 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_ArbiterPriorityMustBeZeroOr
ReplSetConfig twoConfig;
ASSERT_OK(zeroConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "arbiterOnly"
- << true)
+ << "priority" << 0
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(oneConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 1
- << "arbiterOnly"
- << true)
+ << "priority" << 1
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(twoConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 2
- << "arbiterOnly"
- << true)
+ << "priority" << 2
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
@@ -228,11 +196,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigVersionNumberMustB
// Two configurations, identical except for version.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -242,11 +206,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigVersionNumberMustB
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -296,11 +256,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotChangeSetNa
// Two configurations, compatible except for set name.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -310,11 +266,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotChangeSetNa
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs1"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -345,35 +297,25 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotChangeSetId
// Two configurations, compatible except for set ID.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2")
<< BSON("_id" << 3 << "host"
<< "h3"))
- << "settings"
- << BSON("replicaSetId" << OID::gen()))));
+ << "settings" << BSON("replicaSetId" << OID::gen()))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2")
<< BSON("_id" << 3 << "host"
<< "h3"))
- << "settings"
- << BSON("replicaSetId" << OID::gen()))));
+ << "settings" << BSON("replicaSetId" << OID::gen()))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -402,57 +344,40 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotFlipBuildIn
// The third, compatible with the first.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "buildIndexes"
- << false
- << "priority"
- << 0)
+ << "buildIndexes" << false
+ << "priority" << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "buildIndexes"
- << true
- << "priority"
- << 0)
+ << "buildIndexes" << true
+ << "priority" << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
- ASSERT_OK(oldConfigRefresh.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "buildIndexes"
- << false
- << "priority"
- << 0)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(
+ oldConfigRefresh.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "buildIndexes" << false
+ << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -484,51 +409,37 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotFlipArbiter
// The third, compatible with the first.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly"
- << false)
+ << "arbiterOnly" << false)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
- ASSERT_OK(oldConfigRefresh.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "arbiterOnly"
- << false)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(
+ oldConfigRefresh.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "arbiterOnly" << false)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -562,11 +473,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_HostAndIdRemappingRestricte
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -582,10 +489,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_HostAndIdRemappingRestricte
ASSERT_OK(
legalNewConfigWithNewHostAndId.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
+ << "version" << 2 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
@@ -607,11 +511,8 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_HostAndIdRemappingRestricte
//
ASSERT_OK(illegalNewConfigReusingHost.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion"
+ << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 4 << "host"
@@ -638,10 +539,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_HostAndIdRemappingRestricte
//
ASSERT_OK(illegalNewConfigReusingId.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
+ << "version" << 2 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
@@ -662,11 +560,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_MustFindSelf) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -677,11 +571,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_MustFindSelf) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -738,69 +628,46 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_ArbiterPriorityValueMustBeZ
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(zeroConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "arbiterOnly"
- << true)
+ << "priority" << 0
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(oneConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 1
- << "arbiterOnly"
- << true)
+ << "priority" << 1
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(twoConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 2
- << "arbiterOnly"
- << true)
+ << "priority" << 2
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
@@ -831,11 +698,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_SelfMustEndElectable) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -846,17 +709,12 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_SelfMustEndElectable) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
@@ -880,10 +738,7 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_NewConfigInvalid) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
+ << "version" << 2 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
@@ -905,22 +760,14 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigInvalid) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")))));
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -948,22 +795,14 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigWriteConcernNotSat
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")))));
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2"))
<< "settings"
@@ -991,11 +830,7 @@ TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigInvalid) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -1015,15 +850,10 @@ TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigValid) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 1 << "host"
<< "h3")))));
@@ -1041,11 +871,7 @@ TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigWriteConcernNotSati
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2"))
<< "settings"
@@ -1065,11 +891,7 @@ TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigInvalid)
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -1089,11 +911,7 @@ TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigValid) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -1112,11 +930,7 @@ TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigWriteConc
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -1137,11 +951,7 @@ TEST_F(ServiceContextTest, ValidateForReconfig_ForceStillNeedsValidConfig) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -1151,11 +961,7 @@ TEST_F(ServiceContextTest, ValidateForReconfig_ForceStillNeedsValidConfig) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -1176,11 +982,7 @@ TEST_F(ServiceContextTest, ValidateForReconfig_ForceStillNeedsSelfPresent) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -1190,11 +992,7 @@ TEST_F(ServiceContextTest, ValidateForReconfig_ForceStillNeedsSelfPresent) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h3")
<< BSON("_id" << 2 << "host"
diff --git a/src/mongo/db/repl/repl_set_config_test.cpp b/src/mongo/db/repl/repl_set_config_test.cpp
index c795d711aa3..88d36b1b174 100644
--- a/src/mongo/db/repl/repl_set_config_test.cpp
+++ b/src/mongo/db/repl/repl_set_config_test.cpp
@@ -63,11 +63,7 @@ TEST(ReplSetConfig, ParseMinimalConfigAndCheckDefaults) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
@@ -92,32 +88,24 @@ TEST(ReplSetConfig, ParseMinimalConfigAndCheckDefaults) {
TEST(ReplSetConfig, ParseLargeConfigAndCheckAccessors) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1234
- << "members"
- << BSON_ARRAY(BSON("_id" << 234 << "host"
- << "localhost:12345"
- << "tags"
- << BSON("NYC"
- << "NY")))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("getLastErrorDefaults"
- << BSON("w"
- << "majority")
- << "getLastErrorModes"
- << BSON("eastCoast" << BSON("NYC" << 1))
- << "chainingAllowed"
- << false
- << "heartbeatIntervalMillis"
- << 5000
- << "heartbeatTimeoutSecs"
- << 120
- << "electionTimeoutMillis"
- << 10))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1234 << "members"
+ << BSON_ARRAY(BSON("_id" << 234 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY")))
+ << "protocolVersion" << 1 << "settings"
+ << BSON("getLastErrorDefaults"
+ << BSON("w"
+ << "majority")
+ << "getLastErrorModes"
+ << BSON("eastCoast" << BSON("NYC" << 1)) << "chainingAllowed"
+ << false << "heartbeatIntervalMillis" << 5000
+ << "heartbeatTimeoutSecs" << 120 << "electionTimeoutMillis"
+ << 10))));
ASSERT_OK(config.validate());
ASSERT_EQUALS("rs0", config.getReplSetName());
ASSERT_EQUALS(1234, config.getConfigVersion());
@@ -139,27 +127,20 @@ TEST(ReplSetConfig, ParseLargeConfigAndCheckAccessors) {
TEST(ReplSetConfig, GetConnectionStringFiltersHiddenNodes) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:11111")
- << BSON("_id" << 1 << "host"
- << "localhost:22222"
- << "arbiterOnly"
- << true)
- << BSON("_id" << 2 << "host"
- << "localhost:33333"
- << "hidden"
- << true
- << "priority"
- << 0)
- << BSON("_id" << 3 << "host"
- << "localhost:44444")))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:11111")
+ << BSON("_id" << 1 << "host"
+ << "localhost:22222"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 2 << "host"
+ << "localhost:33333"
+ << "hidden" << true << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "localhost:44444")))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(ConnectionString::forReplicaSet(
"rs0", {HostAndPort{"localhost:11111"}, HostAndPort{"localhost:44444"}})
@@ -169,31 +150,22 @@ TEST(ReplSetConfig, GetConnectionStringFiltersHiddenNodes) {
TEST(ReplSetConfig, MajorityCalculationThreeVotersNoArbiters) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1")
- << BSON("_id" << 3 << "host"
- << "h3:1")
- << BSON("_id" << 4 << "host"
- << "h4:1"
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 5 << "host"
- << "h5:1"
- << "votes"
- << 0
- << "priority"
- << 0)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1"
+ << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 5 << "host"
+ << "h5:1"
+ << "votes" << 0 << "priority" << 0)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(2, config.getWriteMajority());
@@ -201,37 +173,25 @@ TEST(ReplSetConfig, MajorityCalculationThreeVotersNoArbiters) {
TEST(ReplSetConfig, MajorityCalculationNearlyHalfArbiters) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id"
- << 0)
- << BSON("host"
- << "node2:12345"
- << "_id"
- << 1)
- << BSON("host"
- << "node3:12345"
- << "_id"
- << 2)
- << BSON("host"
- << "node4:12345"
- << "_id"
- << 3
- << "arbiterOnly"
- << true)
- << BSON("host"
- << "node5:12345"
- << "_id"
- << 4
- << "arbiterOnly"
- << true)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id" << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3 << "arbiterOnly" << true)
+ << BSON("host"
+ << "node5:12345"
+ << "_id" << 4 << "arbiterOnly" << true)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(3, config.getWriteMajority());
}
@@ -240,68 +200,45 @@ TEST(ReplSetConfig, MajorityCalculationEvenNumberOfMembers) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3)))));
+ << "_id" << 3)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(3, config.getWriteMajority());
}
TEST(ReplSetConfig, MajorityCalculationNearlyHalfSecondariesNoVotes) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id"
- << 0)
- << BSON("host"
- << "node2:12345"
- << "_id"
- << 1
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("host"
- << "node3:12345"
- << "_id"
- << 2
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("host"
- << "node4:12345"
- << "_id"
- << 3)
- << BSON("host"
- << "node5:12345"
- << "_id"
- << 4)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(
+ BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1 << "votes" << 0 << "priority" << 0)
+ << BSON("host"
+ << "node3:12345"
+ << "_id" << 2 << "votes" << 0 << "priority" << 0)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3)
+ << BSON("host"
+ << "node5:12345"
+ << "_id" << 4)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(2, config.getWriteMajority());
}
@@ -317,18 +254,14 @@ TEST(ReplSetConfig, ParseFailsWithBadOrMissingIdField) {
// Replica set name must be present.
ASSERT_EQUALS(
ErrorCodes::NoSuchKey,
- config.initialize(
- BSON("version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ config.initialize(BSON("version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
// Empty repl set name parses, but does not validate.
ASSERT_OK(config.initialize(BSON("_id"
<< ""
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
@@ -341,9 +274,7 @@ TEST(ReplSetConfig, ParseFailsWithBadOrMissingVersionField) {
ASSERT_EQUALS(ErrorCodes::NoSuchKey,
config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
@@ -351,29 +282,19 @@ TEST(ReplSetConfig, ParseFailsWithBadOrMissingVersionField) {
<< "rs0"
<< "version"
<< "1"
- << "protocolVersion"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1.0
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1.0 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 0.0
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 0.0 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
@@ -381,9 +302,7 @@ TEST(ReplSetConfig, ParseFailsWithBadOrMissingVersionField) {
<< "rs0"
<< "version"
<< static_cast<long long>(std::numeric_limits<int>::max()) + 1
- << "protocolVersion"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
@@ -394,21 +313,13 @@ TEST(ReplSetConfig, ParseFailsWithBadMembers) {
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< "localhost:23456"))));
ASSERT_NOT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("host"
<< "localhost:12345")))));
}
@@ -417,11 +328,7 @@ TEST(ReplSetConfig, ParseFailsWithLocalNonLocalHostMix) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost")
<< BSON("_id" << 1 << "host"
@@ -433,15 +340,11 @@ TEST(ReplSetConfig, ParseFailsWithNoElectableNodes) {
ReplSetConfig config;
const BSONObj configBsonNoElectableNodes = BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
+ << "version" << 1 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
<< "priority"
@@ -450,57 +353,41 @@ TEST(ReplSetConfig, ParseFailsWithNoElectableNodes) {
ASSERT_OK(config.initialize(configBsonNoElectableNodes));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- const BSONObj configBsonNoElectableNodesOneArbiter = BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(
- BSON("_id" << 0 << "host"
- << "localhost:1"
- << "arbiterOnly"
- << 1)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "priority"
- << 0)));
+ const BSONObj configBsonNoElectableNodesOneArbiter =
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "arbiterOnly" << 1)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "priority" << 0)));
ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- const BSONObj configBsonNoElectableNodesTwoArbiters = BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(
- BSON("_id" << 0 << "host"
- << "localhost:1"
- << "arbiterOnly"
- << 1)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "arbiterOnly"
- << 1)));
+ const BSONObj configBsonNoElectableNodesTwoArbiters =
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "arbiterOnly" << 1)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "arbiterOnly" << 1)));
ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
const BSONObj configBsonOneElectableNode = BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
+ << "version" << 1 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
<< "priority"
@@ -511,46 +398,30 @@ TEST(ReplSetConfig, ParseFailsWithNoElectableNodes) {
TEST(ReplSetConfig, ParseFailsWithTooFewVoters) {
ReplSetConfig config;
- const BSONObj configBsonNoVoters = BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:1"
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "votes"
- << 0
- << "priority"
- << 0)));
+ const BSONObj configBsonNoVoters =
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "votes" << 0 << "priority" << 0)));
ASSERT_OK(config.initialize(configBsonNoVoters));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
const BSONObj configBsonOneVoter = BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "votes"
- << 0
- << "priority"
+ << "votes" << 0 << "priority"
<< 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
- << "votes"
- << 1)));
+ << "votes" << 1)));
ASSERT_OK(config.initialize(configBsonOneVoter));
ASSERT_OK(config.validate());
}
@@ -567,11 +438,7 @@ TEST(ReplSetConfig, ParseFailsWithDuplicateHost) {
ReplSetConfig config;
const BSONObj configBson = BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1")
<< BSON("_id" << 1 << "host"
@@ -621,14 +488,11 @@ TEST(ReplSetConfig, ParseFailsWithTooManyNodes) {
TEST(ReplSetConfig, ParseFailsWithUnexpectedField) {
ReplSetConfig config;
- Status status = config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "unexpectedfield"
- << "value"));
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "unexpectedfield"
+ << "value"));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
}
@@ -636,11 +500,7 @@ TEST(ReplSetConfig, ParseFailsWithNonArrayMembersField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< "value"));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -649,11 +509,7 @@ TEST(ReplSetConfig, ParseFailsWithNonNumericHeartbeatIntervalMillisField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -671,11 +527,7 @@ TEST(ReplSetConfig, ParseFailsWithNonNumericElectionTimeoutMillisField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -688,11 +540,7 @@ TEST(ReplSetConfig, ParseFailsWithNonNumericHeartbeatTimeoutSecsField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -705,11 +553,7 @@ TEST(ReplSetConfig, ParseFailsWithNonBoolChainingAllowedField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -722,11 +566,7 @@ TEST(ReplSetConfig, ParseFailsWithNonBoolConfigServerField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "configsvr"
@@ -738,11 +578,7 @@ TEST(ReplSetConfig, ParseFailsWithNonObjectSettingsField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -754,11 +590,7 @@ TEST(ReplSetConfig, ParseFailsWithGetLastErrorDefaultsFieldUnparseable) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -771,11 +603,7 @@ TEST(ReplSetConfig, ParseFailsWithNonObjectGetLastErrorDefaultsField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -788,11 +616,7 @@ TEST(ReplSetConfig, ParseFailsWithNonObjectGetLastErrorModesField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -805,11 +629,7 @@ TEST(ReplSetConfig, ParseFailsWithDuplicateGetLastErrorModesField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -824,20 +644,16 @@ TEST(ReplSetConfig, ParseFailsWithDuplicateGetLastErrorModesField) {
TEST(ReplSetConfig, ParseFailsWithNonObjectGetLastErrorModesEntryField) {
ReplSetConfig config;
- Status status = config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "tags"
- << BSON("tag"
- << "yes")))
- << "settings"
- << BSON("getLastErrorModes" << BSON("one" << 1))));
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings" << BSON("getLastErrorModes" << BSON("one" << 1))));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -846,11 +662,7 @@ TEST(ReplSetConfig, ParseFailsWithNonNumericGetLastErrorModesConstraintValue) {
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -867,11 +679,7 @@ TEST(ReplSetConfig, ParseFailsWithNegativeGetLastErrorModesConstraintValue) {
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -887,11 +695,7 @@ TEST(ReplSetConfig, ParseFailsWithNonExistentGetLastErrorModesConstraintTag) {
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -906,13 +710,8 @@ TEST(ReplSetConfig, ParseFailsWithRepairField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "repaired"
- << true
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "repaired" << true << "version" << 1
+ << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))));
ASSERT_EQUALS(ErrorCodes::RepairedReplicaSetNode, status);
@@ -922,11 +721,7 @@ TEST(ReplSetConfig, ValidateFailsWithBadProtocolVersion) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 3
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 3 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
@@ -941,11 +736,7 @@ TEST(ReplSetConfig, ValidateFailsWithProtocolVersion0) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 0
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 0 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
@@ -960,11 +751,7 @@ TEST(ReplSetConfig, ValidateFailsWithDuplicateMemberId) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 0 << "host"
@@ -979,15 +766,10 @@ TEST(ReplSetConfig, ValidateFailsWithInvalidMember) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "hidden"
- << true))));
+ << "hidden" << true))));
ASSERT_OK(status);
status = config.validate();
@@ -998,29 +780,19 @@ TEST(ReplSetConfig, ChainingAllowedField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("chainingAllowed" << true))));
+ << "settings" << BSON("chainingAllowed" << true))));
ASSERT_OK(config.validate());
ASSERT_TRUE(config.isChainingAllowed());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("chainingAllowed" << false))));
+ << "settings" << BSON("chainingAllowed" << false))));
ASSERT_OK(config.validate());
ASSERT_FALSE(config.isChainingAllowed());
}
@@ -1029,13 +801,8 @@ TEST(ReplSetConfig, ConfigServerField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_TRUE(config.isConfigServer());
@@ -1043,13 +810,8 @@ TEST(ReplSetConfig, ConfigServerField) {
ReplSetConfig config2;
ASSERT_OK(config2.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "configsvr"
- << false
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "configsvr"
+ << false << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config2.isConfigServer());
@@ -1072,25 +834,18 @@ TEST(ReplSetConfig, ConfigServerFieldDefaults) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config.isConfigServer());
ReplSetConfig config2;
- ASSERT_OK(config2.initializeForInitiate(BSON("_id"
- << "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(
+ config2.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_FALSE(config2.isConfigServer());
serverGlobalParams.clusterRole = ClusterRole::ConfigServer;
@@ -1099,25 +854,18 @@ TEST(ReplSetConfig, ConfigServerFieldDefaults) {
ReplSetConfig config3;
ASSERT_OK(config3.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config3.isConfigServer());
ReplSetConfig config4;
- ASSERT_OK(config4.initializeForInitiate(BSON("_id"
- << "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(
+ config4.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_TRUE(config4.isConfigServer());
}
@@ -1125,29 +873,19 @@ TEST(ReplSetConfig, HeartbeatIntervalField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 5000))));
+ << "settings" << BSON("heartbeatIntervalMillis" << 5000))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Seconds(5), config.getHeartbeatInterval());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << -5000))));
+ << "settings" << BSON("heartbeatIntervalMillis" << -5000))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
}
@@ -1155,29 +893,19 @@ TEST(ReplSetConfig, ElectionTimeoutField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("electionTimeoutMillis" << 20))));
+ << "settings" << BSON("electionTimeoutMillis" << 20))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Milliseconds(20), config.getElectionTimeoutPeriod());
auto status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("electionTimeoutMillis" << -20)));
+ << "settings" << BSON("electionTimeoutMillis" << -20)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "election timeout must be greater than 0");
}
@@ -1186,29 +914,19 @@ TEST(ReplSetConfig, HeartbeatTimeoutField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 20))));
+ << "settings" << BSON("heartbeatTimeoutSecs" << 20))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Seconds(20), config.getHeartbeatTimeoutPeriod());
auto status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("heartbeatTimeoutSecs" << -20)));
+ << "settings" << BSON("heartbeatTimeoutSecs" << -20)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "heartbeat timeout must be greater than 0");
}
@@ -1217,11 +935,7 @@ TEST(ReplSetConfig, GleDefaultField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -1232,11 +946,7 @@ TEST(ReplSetConfig, GleDefaultField) {
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -1244,27 +954,19 @@ TEST(ReplSetConfig, GleDefaultField) {
<< "frim")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"))
- << "settings"
- << BSON("getLastErrorDefaults" << BSON("w" << 0)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("getLastErrorDefaults" << BSON("w" << 0)))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
ASSERT_OK(
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -1352,19 +1054,15 @@ bool operator==(const ReplSetConfig& a, const ReplSetConfig& b) {
TEST(ReplSetConfig, toBSONRoundTripAbility) {
ReplSetConfig configA;
ReplSetConfig configB;
- ASSERT_OK(configA.initialize(BSON(
- "_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20 << "replicaSetId"
- << OID::gen()))));
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis"
+ << 5000 << "heartbeatTimeoutSecs" << 20
+ << "replicaSetId" << OID::gen()))));
ASSERT_OK(configB.initialize(configA.toBSON()));
ASSERT_TRUE(configA == configB);
}
@@ -1372,132 +1070,83 @@ TEST(ReplSetConfig, toBSONRoundTripAbility) {
TEST(ReplSetConfig, toBSONRoundTripAbilityWithHorizon) {
ReplSetConfig configA;
ReplSetConfig configB;
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "horizons"
+ << BSON("horizon"
+ << "example.com:42")))
+ << "settings"
+ << BSON("heartbeatIntervalMillis"
+ << 5000 << "heartbeatTimeoutSecs" << 20
+ << "replicaSetId" << OID::gen()))));
+ ASSERT_OK(configB.initialize(configA.toBSON()));
+ ASSERT_TRUE(configA == configB);
+}
+
+TEST(ReplSetConfig, toBSONRoundTripAbilityLarge) {
+ ReplSetConfig configA;
+ ReplSetConfig configB;
ASSERT_OK(configA.initialize(BSON(
"_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "asdf"
+ << "version" << 9 << "writeConcernMajorityJournalDefault" << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "horizons"
- << BSON("horizon"
- << "example.com:42")))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20 << "replicaSetId"
- << OID::gen()))));
- ASSERT_OK(configB.initialize(configA.toBSON()));
+ << "arbiterOnly" << true << "votes" << 1)
+ << BSON("_id" << 3 << "host"
+ << "localhost:3828"
+ << "arbiterOnly" << false << "hidden" << true << "buildIndexes"
+ << false << "priority" << 0 << "slaveDelay" << 17 << "votes"
+ << 0 << "tags"
+ << BSON("coast"
+ << "east"
+ << "ssd"
+ << "true"))
+ << BSON("_id" << 2 << "host"
+ << "foo.com:3828"
+ << "votes" << 0 << "priority" << 0 << "tags"
+ << BSON("coast"
+ << "west"
+ << "hdd"
+ << "true")))
+ << "protocolVersion" << 1 << "settings"
+
+ << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20
+ << "electionTimeoutMillis" << 4 << "chainingAllowd"
+ << true << "getLastErrorDefaults"
+ << BSON("w"
+ << "majority")
+ << "getLastErrorModes"
+ << BSON("disks" << BSON("ssd" << 1 << "hdd" << 1)
+ << "coasts" << BSON("coast" << 2))))));
+ BSONObj configObjA = configA.toBSON();
+ ASSERT_OK(configB.initialize(configObjA));
ASSERT_TRUE(configA == configB);
}
-TEST(ReplSetConfig, toBSONRoundTripAbilityLarge) {
+TEST(ReplSetConfig, toBSONRoundTripAbilityInvalid) {
ReplSetConfig configA;
ReplSetConfig configB;
ASSERT_OK(configA.initialize(
BSON("_id"
- << "asdf"
- << "version"
- << 9
- << "writeConcernMajorityJournalDefault"
- << true
- << "members"
+ << ""
+ << "version" << -3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "arbiterOnly"
- << true
- << "votes"
- << 1)
- << BSON("_id" << 3 << "host"
+ << "arbiterOnly" << true << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 0 << "host"
<< "localhost:3828"
- << "arbiterOnly"
- << false
- << "hidden"
- << true
- << "buildIndexes"
- << false
- << "priority"
- << 0
- << "slaveDelay"
- << 17
- << "votes"
- << 0
- << "tags"
- << BSON("coast"
- << "east"
- << "ssd"
- << "true"))
+ << "arbiterOnly" << false << "buildIndexes" << false
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
- << "foo.com:3828"
- << "votes"
- << 0
- << "priority"
- << 0
- << "tags"
- << BSON("coast"
- << "west"
- << "hdd"
- << "true")))
- << "protocolVersion"
- << 1
+ << "localhost:3828"
+ << "votes" << 0 << "priority" << 0))
<< "settings"
-
- << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20
- << "electionTimeoutMillis"
- << 4
- << "chainingAllowd"
- << true
- << "getLastErrorDefaults"
- << BSON("w"
- << "majority")
- << "getLastErrorModes"
- << BSON("disks" << BSON("ssd" << 1 << "hdd" << 1)
- << "coasts"
- << BSON("coast" << 2))))));
- BSONObj configObjA = configA.toBSON();
- ASSERT_OK(configB.initialize(configObjA));
- ASSERT_TRUE(configA == configB);
-}
-
-TEST(ReplSetConfig, toBSONRoundTripAbilityInvalid) {
- ReplSetConfig configA;
- ReplSetConfig configB;
- ASSERT_OK(
- configA.initialize(BSON("_id"
- << ""
- << "version"
- << -3
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "arbiterOnly"
- << true
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 0 << "host"
- << "localhost:3828"
- << "arbiterOnly"
- << false
- << "buildIndexes"
- << false
- << "priority"
- << 2)
- << BSON("_id" << 2 << "host"
- << "localhost:3828"
- << "votes"
- << 0
- << "priority"
- << 0))
- << "settings"
- << BSON("heartbeatIntervalMillis" << -5000 << "heartbeatTimeoutSecs"
- << 20
- << "electionTimeoutMillis"
- << 2))));
+ << BSON("heartbeatIntervalMillis" << -5000 << "heartbeatTimeoutSecs" << 20
+ << "electionTimeoutMillis" << 2))));
ASSERT_OK(configB.initialize(configA.toBSON()));
ASSERT_NOT_OK(configA.validate());
ASSERT_NOT_OK(configB.validate());
@@ -1506,59 +1155,52 @@ TEST(ReplSetConfig, toBSONRoundTripAbilityInvalid) {
TEST(ReplSetConfig, CheckIfWriteConcernCanBeSatisfied) {
ReplSetConfig configA;
- ASSERT_OK(configA.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node0"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA1"))
- << BSON("_id" << 1 << "host"
- << "node1"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA2"))
- << BSON("_id" << 2 << "host"
- << "node2"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA3"))
- << BSON("_id" << 3 << "host"
- << "node3"
- << "tags"
- << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU1"))
- << BSON("_id" << 4 << "host"
- << "node4"
- << "tags"
- << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU2"))
- << BSON("_id" << 5 << "host"
- << "node5"
- << "arbiterOnly"
- << true))
- << "settings"
- << BSON("getLastErrorModes"
- << BSON("valid" << BSON("dc" << 2 << "rack" << 3)
- << "invalidNotEnoughValues"
- << BSON("dc" << 3)
- << "invalidNotEnoughNodes"
- << BSON("rack" << 6))))));
+ ASSERT_OK(configA.initialize(BSON(
+ "_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node0"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA1"))
+ << BSON("_id" << 1 << "host"
+ << "node1"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA2"))
+ << BSON("_id" << 2 << "host"
+ << "node2"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA3"))
+ << BSON("_id" << 3 << "host"
+ << "node3"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU1"))
+ << BSON("_id" << 4 << "host"
+ << "node4"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU2"))
+ << BSON("_id" << 5 << "host"
+ << "node5"
+ << "arbiterOnly" << true))
+ << "settings"
+ << BSON("getLastErrorModes" << BSON(
+ "valid" << BSON("dc" << 2 << "rack" << 3) << "invalidNotEnoughValues"
+ << BSON("dc" << 3) << "invalidNotEnoughNodes" << BSON("rack" << 6))))));
WriteConcernOptions validNumberWC;
validNumberWC.wNumNodes = 5;
@@ -1619,19 +1261,13 @@ TEST(ReplSetConfig, CheckConfigServerCantHaveArbiters) {
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "arbiterOnly"
- << true)))));
+ << "arbiterOnly" << true)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "Arbiters are not allowed");
@@ -1641,21 +1277,14 @@ TEST(ReplSetConfig, CheckConfigServerMustBuildIndexes) {
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 0
- << "buildIndexes"
- << false)))));
+ << "priority" << 0
+ << "buildIndexes" << false)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "must build indexes");
@@ -1665,20 +1294,13 @@ TEST(ReplSetConfig, CheckConfigServerCantHaveSlaveDelay) {
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 0
- << "slaveDelay"
+ << "priority" << 0 << "slaveDelay"
<< 3)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
@@ -1691,19 +1313,13 @@ TEST(ReplSetConfig, CheckConfigServerMustHaveTrueForWriteConcernMajorityJournalD
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"))
- << "writeConcernMajorityJournalDefault"
- << false)));
+ << "writeConcernMajorityJournalDefault" << false)));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), " must be true in replica set configurations being ");
@@ -1713,33 +1329,23 @@ TEST(ReplSetConfig, GetPriorityTakeoverDelay) {
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1)
+ << "priority" << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 3 << "host"
<< "localhost:5421"
- << "priority"
- << 4)
+ << "priority" << 4)
<< BSON("_id" << 4 << "host"
<< "localhost:5431"
- << "priority"
- << 5))
- << "settings"
- << BSON("electionTimeoutMillis" << 1000))));
+ << "priority" << 5))
+ << "settings" << BSON("electionTimeoutMillis" << 1000))));
ASSERT_OK(configA.validate());
ASSERT_EQUALS(Milliseconds(5000), configA.getPriorityTakeoverDelay(0));
ASSERT_EQUALS(Milliseconds(4000), configA.getPriorityTakeoverDelay(1));
@@ -1750,33 +1356,23 @@ TEST(ReplSetConfig, GetPriorityTakeoverDelay) {
ReplSetConfig configB;
ASSERT_OK(configB.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1)
+ << "priority" << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 3 << "host"
<< "localhost:5421"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 4 << "host"
<< "localhost:5431"
- << "priority"
- << 3))
- << "settings"
- << BSON("electionTimeoutMillis" << 1000))));
+ << "priority" << 3))
+ << "settings" << BSON("electionTimeoutMillis" << 1000))));
ASSERT_OK(configB.validate());
ASSERT_EQUALS(Milliseconds(5000), configB.getPriorityTakeoverDelay(0));
ASSERT_EQUALS(Milliseconds(3000), configB.getPriorityTakeoverDelay(1));
@@ -1789,29 +1385,20 @@ TEST(ReplSetConfig, GetCatchUpTakeoverDelay) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("catchUpTakeoverDelayMillis" << 5000))));
+ << "settings" << BSON("catchUpTakeoverDelayMillis" << 5000))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Milliseconds(5000), config.getCatchUpTakeoverDelay());
- Status status = config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"))
- << "settings"
- << BSON("catchUpTakeoverDelayMillis" << -5000)));
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("catchUpTakeoverDelayMillis" << -5000)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(
status.reason(),
@@ -1822,23 +1409,16 @@ TEST(ReplSetConfig, GetCatchUpTakeoverDelayDefault) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1)
+ << "priority" << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority"
- << 3)))));
+ << "priority" << 3)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Milliseconds(30000), config.getCatchUpTakeoverDelay());
}
@@ -1849,11 +1429,7 @@ TEST(ReplSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajorityJou
// PV1, should default to true.
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
@@ -1863,15 +1439,10 @@ TEST(ReplSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajorityJou
// Should be able to set it false in PV1.
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "writeConcernMajorityJournalDefault"
- << false)));
+ << "writeConcernMajorityJournalDefault" << false)));
ASSERT_OK(config.validate());
ASSERT_FALSE(config.getWriteConcernMajorityShouldJournal());
ASSERT_TRUE(config.toBSON().hasField("writeConcernMajorityJournalDefault"));
@@ -1881,11 +1452,7 @@ TEST(ReplSetConfig, HorizonConsistency) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "horizons"
@@ -1914,8 +1481,7 @@ TEST(ReplSetConfig, HorizonConsistency) {
<< "delta"
<< "c.host3:44")))
- << "writeConcernMajorityJournalDefault"
- << false)));
+ << "writeConcernMajorityJournalDefault" << false)));
Status status = config.validate();
ASSERT_NOT_OK(status);
@@ -1929,11 +1495,7 @@ TEST(ReplSetConfig, HorizonConsistency) {
// in the member-config code path.
status = config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "same1"
<< "horizons"
@@ -1978,8 +1540,7 @@ TEST(ReplSetConfig, HorizonConsistency) {
<< "d.host3:44"
<< "delta"
<< "d.host4:44")))
- << "writeConcernMajorityJournalDefault"
- << false));
+ << "writeConcernMajorityJournalDefault" << false));
ASSERT_OK(status) << " failing status was: " << status.reason();
status = config.validate();
@@ -2003,15 +1564,11 @@ TEST(ReplSetConfig, ReplSetId) {
auto status =
ReplSetConfig().initializeForInitiate(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
+ << "version" << 1 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))
+ << "priority" << 1))
<< "settings"
<< BSON("replicaSetId" << OID::gen())));
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, status);
@@ -2025,15 +1582,11 @@ TEST(ReplSetConfig, ReplSetId) {
ASSERT_OK(
configInitiate.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
+ << "version" << 1 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1)))));
+ << "priority" << 1)))));
ASSERT_OK(configInitiate.validate());
ASSERT_TRUE(configInitiate.hasReplicaSetId());
OID replicaSetId = configInitiate.getReplicaSetId();
@@ -2042,17 +1595,11 @@ TEST(ReplSetConfig, ReplSetId) {
ReplSetConfig configLocal;
ASSERT_OK(configLocal.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))
- << "settings"
- << BSON("replicaSetId" << replicaSetId))));
+ << "priority" << 1))
+ << "settings" << BSON("replicaSetId" << replicaSetId))));
ASSERT_OK(configLocal.validate());
ASSERT_TRUE(configLocal.hasReplicaSetId());
ASSERT_EQUALS(replicaSetId, configLocal.getReplicaSetId());
@@ -2061,15 +1608,10 @@ TEST(ReplSetConfig, ReplSetId) {
OID defaultReplicaSetId = OID::gen();
ASSERT_OK(configLocal.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))),
+ << "priority" << 1))),
defaultReplicaSetId));
ASSERT_OK(configLocal.validate());
ASSERT_TRUE(configLocal.hasReplicaSetId());
@@ -2078,34 +1620,22 @@ TEST(ReplSetConfig, ReplSetId) {
// 'replicaSetId' field cannot be null.
status = configLocal.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))
- << "settings"
- << BSON("replicaSetId" << OID())));
+ << "priority" << 1))
+ << "settings" << BSON("replicaSetId" << OID())));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "replicaSetId field value cannot be null");
// 'replicaSetId' field must be an OID.
status = configLocal.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))
- << "settings"
- << BSON("replicaSetId" << 12345)));
+ << "priority" << 1))
+ << "settings" << BSON("replicaSetId" << 12345)));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
ASSERT_STRING_CONTAINS(status.reason(),
"\"replicaSetId\" had the wrong type. Expected objectId, found int");
diff --git a/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp b/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp
index 3b79768db8d..a7cc785995e 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp
@@ -78,10 +78,9 @@ Status ReplSetHeartbeatArgsV1::initialize(const BSONObj& argsObj) {
if (status.isOK()) {
if (tempHeartbeatVersion != 1) {
return Status(ErrorCodes::Error(40666),
- str::stream() << "Found invalid value for field "
- << kHeartbeatVersionFieldName
- << ": "
- << tempHeartbeatVersion);
+ str::stream()
+ << "Found invalid value for field " << kHeartbeatVersionFieldName
+ << ": " << tempHeartbeatVersion);
}
_heartbeatVersion = tempHeartbeatVersion;
_hasHeartbeatVersion = true;
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.cpp b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
index 4b16c88e389..5c43a35c71b 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
@@ -193,18 +193,18 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc,
if (memberStateElement.eoo()) {
_stateSet = false;
} else if (memberStateElement.type() != NumberInt && memberStateElement.type() != NumberLong) {
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << "Expected \"" << kMemberStateFieldName
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << "Expected \"" << kMemberStateFieldName
<< "\" field in response to replSetHeartbeat "
"command to have type NumberInt or NumberLong, but found type "
<< typeName(memberStateElement.type()));
} else {
long long stateInt = memberStateElement.numberLong();
if (stateInt < 0 || stateInt > MemberState::RS_MAX) {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Value for \"" << kMemberStateFieldName
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Value for \"" << kMemberStateFieldName
<< "\" in response to replSetHeartbeat is "
"out of range; legal values are non-negative and no more than "
<< MemberState::RS_MAX);
@@ -217,8 +217,7 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc,
if (configVersionElement.eoo()) {
return Status(ErrorCodes::NoSuchKey,
str::stream() << "Response to replSetHeartbeat missing required \""
- << kConfigVersionFieldName
- << "\" field");
+ << kConfigVersionFieldName << "\" field");
}
if (configVersionElement.type() != NumberInt) {
return Status(ErrorCodes::TypeMismatch,
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
index f3f0f1ce8bb..352456c929d 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
@@ -166,19 +166,16 @@ TEST(ReplSetHeartbeatResponse, InitializeNoDurableWallTime) {
TEST(ReplSetHeartbeatResponse, InitializeWrongAppliedOpTimeType) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << "hello");
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << "hello");
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS("\"opTime\" had the wrong type. Expected object, found string", result.reason());
initializerObj = BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
- << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
<< OpTime().getTimestamp());
result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
@@ -190,9 +187,7 @@ TEST(ReplSetHeartbeatResponse, InitializeNoAppliedWallTime) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON());
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON());
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::NoSuchKey, result);
ASSERT_EQUALS("Missing expected field \"wallTime\"", result.reason());
@@ -202,12 +197,8 @@ TEST(ReplSetHeartbeatResponse, InitializeMemberStateWrongType) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "state"
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100) << "state"
<< "hello");
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
@@ -221,13 +212,8 @@ TEST(ReplSetHeartbeatResponse, InitializeMemberStateTooLow) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "state"
- << -1);
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100) << "state" << -1);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::BadValue, result);
ASSERT_EQUALS(
@@ -240,13 +226,8 @@ TEST(ReplSetHeartbeatResponse, InitializeMemberStateTooHigh) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "state"
- << 11);
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100) << "state" << 11);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::BadValue, result);
ASSERT_EQUALS(
@@ -259,12 +240,8 @@ TEST(ReplSetHeartbeatResponse, InitializeVersionWrongType) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100) << "v"
<< "hello");
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
@@ -276,17 +253,12 @@ TEST(ReplSetHeartbeatResponse, InitializeVersionWrongType) {
TEST(ReplSetHeartbeatResponse, InitializeReplSetNameWrongType) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 2 // needs a version to get this far in initialize()
- << "set"
- << 4);
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime" << Date_t() + Seconds(100)
+ << "v" << 2 // needs a version to get this far in initialize()
+ << "set" << 4);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -297,17 +269,12 @@ TEST(ReplSetHeartbeatResponse, InitializeReplSetNameWrongType) {
TEST(ReplSetHeartbeatResponse, InitializeSyncingToWrongType) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 2 // needs a version to get this far in initialize()
- << "syncingTo"
- << 4);
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime" << Date_t() + Seconds(100)
+ << "v" << 2 // needs a version to get this far in initialize()
+ << "syncingTo" << 4);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -318,17 +285,12 @@ TEST(ReplSetHeartbeatResponse, InitializeSyncingToWrongType) {
TEST(ReplSetHeartbeatResponse, InitializeConfigWrongType) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 2 // needs a version to get this far in initialize()
- << "config"
- << 4);
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime" << Date_t() + Seconds(100)
+ << "v" << 2 // needs a version to get this far in initialize()
+ << "config" << 4);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -339,17 +301,12 @@ TEST(ReplSetHeartbeatResponse, InitializeConfigWrongType) {
TEST(ReplSetHeartbeatResponse, InitializeBadConfig) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 2 // needs a version to get this far in initialize()
- << "config"
- << BSON("illegalFieldName" << 2));
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime" << Date_t() + Seconds(100)
+ << "v" << 2 // needs a version to get this far in initialize()
+ << "config" << BSON("illegalFieldName" << 2));
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::BadValue, result);
ASSERT_EQUALS("Unexpected field illegalFieldName in replica set configuration",
@@ -371,12 +328,9 @@ TEST(ReplSetHeartbeatResponse, InvalidResponseOpTimeMissesConfigVersion) {
ReplSetHeartbeatResponse hbResp;
Status result = hbResp.initialize(BSON("ok" << 1.0 << "durableOpTime"
<< OpTime(Timestamp(100, 0), 0).toBSON()
- << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)),
+ << "durableWallTime" << Date_t() + Seconds(100)
+ << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100)),
0,
/*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.code());
diff --git a/src/mongo/db/repl/replication_consistency_markers_impl.cpp b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
index f2127b70518..a57b7e35ceb 100644
--- a/src/mongo/db/repl/replication_consistency_markers_impl.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
@@ -100,8 +100,7 @@ void ReplicationConsistencyMarkersImpl::initializeMinValidDocument(OperationCont
// will always be greater than the provided ones.
TimestampedBSONObj upsert;
upsert.obj = BSON("$max" << BSON(MinValidDocument::kMinValidTimestampFieldName
- << Timestamp()
- << MinValidDocument::kMinValidTermFieldName
+ << Timestamp() << MinValidDocument::kMinValidTermFieldName
<< OpTime::kUninitializedTerm));
// The initialization write should go into the first checkpoint taken, so we provide no
@@ -153,10 +152,8 @@ void ReplicationConsistencyMarkersImpl::clearInitialSyncFlag(OperationContext* o
update.obj = BSON("$unset" << kInitialSyncFlag << "$set"
<< BSON(MinValidDocument::kMinValidTimestampFieldName
<< time.getTimestamp()
- << MinValidDocument::kMinValidTermFieldName
- << time.getTerm()
- << MinValidDocument::kAppliedThroughFieldName
- << time));
+ << MinValidDocument::kMinValidTermFieldName << time.getTerm()
+ << MinValidDocument::kAppliedThroughFieldName << time));
// We clear the initial sync flag at the 'lastAppliedOpTime'. This is unnecessary, since there
// should not be any stable checkpoints being taken that this write could inadvertantly enter.
@@ -194,10 +191,10 @@ void ReplicationConsistencyMarkersImpl::setMinValid(OperationContext* opCtx,
LOG(3) << "setting minvalid to exactly: " << minValid.toString() << "(" << minValid.toBSON()
<< ")";
TimestampedBSONObj update;
- update.obj = BSON("$set" << BSON(MinValidDocument::kMinValidTimestampFieldName
- << minValid.getTimestamp()
- << MinValidDocument::kMinValidTermFieldName
- << minValid.getTerm()));
+ update.obj =
+ BSON("$set" << BSON(MinValidDocument::kMinValidTimestampFieldName
+ << minValid.getTimestamp() << MinValidDocument::kMinValidTermFieldName
+ << minValid.getTerm()));
// This method is only used with storage engines that do not support recover to stable
// timestamp. As a result, their timestamps do not matter.
@@ -346,8 +343,8 @@ Status ReplicationConsistencyMarkersImpl::createInternalCollections(OperationCon
auto status = _storageInterface->createCollection(opCtx, nss, CollectionOptions());
if (!status.isOK() && status.code() != ErrorCodes::NamespaceExists) {
return {ErrorCodes::CannotCreateCollection,
- str::stream() << "Failed to create collection. Ns: " << nss.ns() << " Error: "
- << status.toString()};
+ str::stream() << "Failed to create collection. Ns: " << nss.ns()
+ << " Error: " << status.toString()};
}
}
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index b9eee2a78a9..8b767924eac 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -774,12 +774,12 @@ public:
virtual std::vector<MemberData> getMemberData() const = 0;
/*
- * Handles an incoming replSetRequestVotes command.
- *
- * Populates the given 'response' object with the result of the request. If there is a failure
- * processing the vote request, returns an error status. If an error is returned, the value of
- * the populated 'response' object is invalid.
- */
+ * Handles an incoming replSetRequestVotes command.
+ *
+ * Populates the given 'response' object with the result of the request. If there is a failure
+ * processing the vote request, returns an error status. If an error is returned, the value of
+ * the populated 'response' object is invalid.
+ */
virtual Status processReplSetRequestVotes(OperationContext* opCtx,
const ReplSetRequestVotesArgs& args,
ReplSetRequestVotesResponse* response) = 0;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index c2744e2bb5a..65cb2744ec4 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -171,13 +171,13 @@ auto makeTaskExecutor(ServiceContext* service, const std::string& poolName) {
* down.
*/
void scheduleWork(executor::TaskExecutor* executor, executor::TaskExecutor::CallbackFn work) {
- auto cbh = executor->scheduleWork([work = std::move(work)](
- const executor::TaskExecutor::CallbackArgs& args) {
- if (args.status == ErrorCodes::CallbackCanceled) {
- return;
- }
- work(args);
- });
+ auto cbh = executor->scheduleWork(
+ [work = std::move(work)](const executor::TaskExecutor::CallbackArgs& args) {
+ if (args.status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ work(args);
+ });
if (cbh == ErrorCodes::ShutdownInProgress) {
return;
}
@@ -529,9 +529,7 @@ Status ReplicationCoordinatorExternalStateImpl::createLocalLastVoteCollection(
if (!status.isOK() && status.code() != ErrorCodes::NamespaceExists) {
return {ErrorCodes::CannotCreateCollection,
str::stream() << "Failed to create local last vote collection. Ns: "
- << lastVoteCollectionName
- << " Error: "
- << status.toString()};
+ << lastVoteCollectionName << " Error: " << status.toString()};
}
// Make sure there's always a last vote document.
@@ -659,9 +657,7 @@ StatusWith<OpTimeAndWallTime> ReplicationCoordinatorExternalStateImpl::loadLastO
return StatusWith<OpTimeAndWallTime>(
ErrorCodes::NoSuchKey,
str::stream() << "Most recent entry in " << NamespaceString::kRsOplogNamespace.ns()
- << " missing \""
- << tsFieldName
- << "\" field");
+ << " missing \"" << tsFieldName << "\" field");
}
if (tsElement.type() != bsonTimestamp) {
return StatusWith<OpTimeAndWallTime>(
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index c7ea74e610a..645ac39e28b 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -132,11 +132,11 @@ private:
void _shardingOnTransitionToPrimaryHook(OperationContext* opCtx);
/**
- * Drops all temporary collections on all databases except "local".
- *
- * The implementation may assume that the caller has acquired the global exclusive lock
- * for "opCtx".
- */
+ * Drops all temporary collections on all databases except "local".
+ *
+ * The implementation may assume that the caller has acquired the global exclusive lock
+ * for "opCtx".
+ */
void _dropAllTempCollections(OperationContext* opCtx);
ServiceContext* _service;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 3f56ada6698..b23082ecb55 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -930,10 +930,9 @@ Status ReplicationCoordinatorImpl::waitForMemberState(MemberState expectedState,
auto pred = [this, expectedState]() { return _memberState == expectedState; };
if (!_memberStateChange.wait_for(lk, timeout.toSystemDuration(), pred)) {
return Status(ErrorCodes::ExceededTimeLimit,
- str::stream() << "Timed out waiting for state to become "
- << expectedState.toString()
- << ". Current state is "
- << _memberState.toString());
+ str::stream()
+ << "Timed out waiting for state to become " << expectedState.toString()
+ << ". Current state is " << _memberState.toString());
}
return Status::OK();
}
@@ -1645,8 +1644,9 @@ bool ReplicationCoordinatorImpl::_doneWaitingForReplication_inlock(
"'committed' optime "
<< opTime
<< ". There are still drop pending collections (earliest drop optime: "
- << *dropOpTime << ") that have to be removed from storage before we can "
- "satisfy the write concern "
+ << *dropOpTime
+ << ") that have to be removed from storage before we can "
+ "satisfy the write concern "
<< writeConcern.toBSON();
return false;
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index d7000f33f35..57daa9385a7 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -546,7 +546,7 @@ private:
* Loops continuously to kill all conflicting operations. And, aborts all stashed (inactive)
* transactions.
* Terminates once killSignaled is set true.
- */
+ */
void _killOpThreadFn();
/*
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index d3ed5c96e6d..baf79108edf 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -60,11 +60,7 @@ using ApplierState = ReplicationCoordinator::ApplierState;
TEST_F(ReplCoordTest, RandomizedElectionOffsetWithinProperBounds) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -91,18 +87,14 @@ TEST_F(ReplCoordTest, RandomizedElectionOffsetWithinProperBounds) {
TEST_F(ReplCoordTest, RandomizedElectionOffsetAvoidsDivideByZero) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 1));
assertStartSuccess(configObj, HostAndPort("node1", 12345));
@@ -113,24 +105,17 @@ TEST_F(ReplCoordTest, RandomizedElectionOffsetAvoidsDivideByZero) {
}
TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
- assertStartSuccess(BSON("_id"
- << "mySet"
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"
- << "votes"
- << 0
- << "hidden"
- << true
- << "priority"
- << 0))
- << "protocolVersion"
- << 1),
- HostAndPort("node1", 12345));
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"
+ << "votes" << 0 << "hidden" << true << "priority" << 0))
+ << "protocolVersion" << 1),
+ HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -185,15 +170,12 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
TEST_F(ReplCoordTest, StartElectionDoesNotStartAnElectionWhenNodeIsRecovering) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_RECOVERING));
@@ -213,13 +195,10 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(10, 1), 0), Date_t() + Seconds(10));
@@ -247,17 +226,14 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
OperationContextNoop opCtx;
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -296,9 +272,7 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) {
TEST_F(ReplCoordTest, ElectionSucceedsWhenMaxSevenNodesVoteYea) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -313,8 +287,7 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenMaxSevenNodesVoteYea) {
<< "node6:12345")
<< BSON("_id" << 7 << "host"
<< "node7:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
OperationContextNoop opCtx;
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -338,17 +311,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun)
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -380,9 +350,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun)
} else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
net->scheduleResponse(noi,
net->now(),
- makeResponseStatus(BSON(
- "ok" << 1 << "term" << 0 << "voteGranted" << false << "reason"
- << "don't like him much")));
+ makeResponseStatus(BSON("ok" << 1 << "term" << 0 << "voteGranted"
+ << false << "reason"
+ << "don't like him much")));
voteRequests++;
} else {
net->blackHole(noi);
@@ -399,17 +369,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -443,9 +410,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) {
noi,
net->now(),
makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long() + 1
- << "voteGranted"
- << false
- << "reason"
+ << "voteGranted" << false << "reason"
<< "quit living in the past")));
voteRequests++;
} else {
@@ -466,9 +431,7 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
OperationContextNoop opCtx;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -479,8 +442,7 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
<< "node4:12345")
<< BSON("_id" << 5 << "host"
<< "node5:12345"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -498,15 +460,12 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
config
.initialize(BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "members"
+ << "version" << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "protocolVersion"
- << 1))
+ << "protocolVersion" << 1))
.transitional_ignore();
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
@@ -593,17 +552,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -627,9 +583,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
} else {
net->scheduleResponse(noi,
net->now(),
- makeResponseStatus(BSON(
- "ok" << 1 << "term" << 1 << "voteGranted" << false << "reason"
- << "don't like him much")));
+ makeResponseStatus(BSON("ok" << 1 << "term" << 1 << "voteGranted"
+ << false << "reason"
+ << "don't like him much")));
}
net->runReadyNetworkOperations();
}
@@ -644,17 +600,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
TEST_F(ReplCoordTest, TransitionToRollbackFailsWhenElectionInProgress) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -683,17 +636,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -719,9 +669,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) {
noi,
net->now(),
makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long() + 1
- << "voteGranted"
- << false
- << "reason"
+ << "voteGranted" << false << "reason"
<< "quit living in the past")));
}
net->runReadyNetworkOperations();
@@ -738,17 +686,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringDryRun) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -779,17 +724,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -816,10 +758,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
net->scheduleResponse(
noi,
net->now(),
- makeResponseStatus(BSON(
- "ok" << 1 << "term" << request.cmdObj["term"].Long() << "voteGranted" << true
- << "reason"
- << "")));
+ makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true << "reason"
+ << "")));
}
net->runReadyNetworkOperations();
}
@@ -982,18 +923,14 @@ private:
TEST_F(TakeoverTest, DoesntScheduleCatchupTakeoverIfCatchupDisabledButTakeoverDelaySet) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("catchUpTimeoutMillis" << 0 << "catchUpTakeoverDelay"
<< 10000));
assertStartSuccess(configObj, HostAndPort("node1", 12345));
@@ -1024,17 +961,14 @@ TEST_F(TakeoverTest, DoesntScheduleCatchupTakeoverIfCatchupDisabledButTakeoverDe
TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfNodeIsFresherThanCurrentPrimary) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1071,21 +1005,16 @@ TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfNodeIsFresherThanCurrentPrimary)
TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfBothTakeoversAnOption) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1);
+ << "priority" << 3))
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1123,19 +1052,15 @@ TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfBothTakeoversAnOption) {
TEST_F(TakeoverTest, PrefersPriorityToCatchupTakeoverIfNodeHasHighestPriority) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(2));
startCapturingLogMessages();
@@ -1179,17 +1104,14 @@ TEST_F(TakeoverTest, PrefersPriorityToCatchupTakeoverIfNodeHasHighestPriority) {
TEST_F(TakeoverTest, CatchupTakeoverNotScheduledTwice) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1236,21 +1158,16 @@ TEST_F(TakeoverTest, CatchupTakeoverNotScheduledTwice) {
TEST_F(TakeoverTest, CatchupAndPriorityTakeoverNotScheduledAtSameTime) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1);
+ << "priority" << 3))
+ << "protocolVersion" << 1);
// In order for node 1 to first schedule a catchup takeover, then a priority takeover
// once the first gets canceled, it must have a higher priority than the current primary
// (node 2). But, it must not have the highest priority in the replica set. Otherwise,
@@ -1302,17 +1219,14 @@ TEST_F(TakeoverTest, CatchupAndPriorityTakeoverNotScheduledAtSameTime) {
TEST_F(TakeoverTest, CatchupTakeoverCallbackCanceledIfElectionTimeoutRuns) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1376,17 +1290,14 @@ TEST_F(TakeoverTest, CatchupTakeoverCallbackCanceledIfElectionTimeoutRuns) {
TEST_F(TakeoverTest, CatchupTakeoverCanceledIfTransitionToRollback) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1439,17 +1350,14 @@ TEST_F(TakeoverTest, CatchupTakeoverCanceledIfTransitionToRollback) {
TEST_F(TakeoverTest, SuccessfulCatchupTakeover) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
HostAndPort primaryHostAndPort("node2", 12345);
@@ -1522,9 +1430,7 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -1535,8 +1441,7 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
<< "node4:12345")
<< BSON("_id" << 5 << "host"
<< "node5:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
HostAndPort primaryHostAndPort("node2", 12345);
@@ -1598,12 +1503,11 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
net->blackHole(noi);
} else {
bool voteGranted = request.target != primaryHostAndPort;
- net->scheduleResponse(
- noi,
- until,
- makeResponseStatus(BSON("ok" << 1 << "term" << 1 << "voteGranted" << voteGranted
- << "reason"
- << "")));
+ net->scheduleResponse(noi,
+ until,
+ makeResponseStatus(BSON("ok" << 1 << "term" << 1 << "voteGranted"
+ << voteGranted << "reason"
+ << "")));
voteRequests++;
}
net->runReadyNetworkOperations();
@@ -1631,17 +1535,14 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
TEST_F(TakeoverTest, PrimaryCatchesUpBeforeCatchupTakeover) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1692,21 +1593,16 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeCatchupTakeover) {
TEST_F(TakeoverTest, PrimaryCatchesUpBeforeHighPriorityNodeCatchupTakeover) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1);
+ << "priority" << 3))
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1776,19 +1672,15 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeHighPriorityNodeCatchupTakeover) {
TEST_F(TakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurrentPrimary) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1823,19 +1715,15 @@ TEST_F(TakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurrent
TEST_F(TakeoverTest, SuccessfulPriorityTakeover) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1894,19 +1782,15 @@ TEST_F(TakeoverTest, SuccessfulPriorityTakeover) {
TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedSameSecond) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
HostAndPort primaryHostAndPort("node2", 12345);
@@ -1973,19 +1857,15 @@ TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedSameSecond) {
TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedDifferentSecond) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
HostAndPort primaryHostAndPort("node2", 12345);
@@ -2053,19 +1933,14 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
// Start up and become electable.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 100)),
+ << "settings" << BSON("heartbeatIntervalMillis" << 100)),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -2093,11 +1968,7 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version"
- << 4
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 4 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -2118,19 +1989,14 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringVotePhase)
// Start up and become electable.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 100)),
+ << "settings" << BSON("heartbeatIntervalMillis" << 100)),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -2143,11 +2009,7 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringVotePhase)
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version"
- << 4
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 4 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -2209,14 +2071,13 @@ protected:
net->getNextReadyRequest(), net->now(), makeHeartbeatResponse(opTime));
} else if (request.cmdObj.firstElement().fieldNameStringData() ==
"replSetRequestVotes") {
- net->scheduleResponse(net->getNextReadyRequest(),
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term"
- << request.cmdObj["term"].Long()
- << "voteGranted"
- << true)));
+ net->scheduleResponse(
+ net->getNextReadyRequest(),
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true)));
} else {
// Stop the loop and let the caller handle unexpected requests.
net->exitNetwork();
@@ -2230,18 +2091,14 @@ protected:
ReplSetConfig setUp3NodeReplSetAndRunForElection(OpTime opTime, long long timeout = 5000) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 1 << "catchUpTimeoutMillis"
<< timeout));
assertStartSuccess(configObj, HostAndPort("node1", 12345));
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 1a6804a68c1..eff5d557e1f 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -174,11 +174,11 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
if (replMetadata.isOK() && _rsConfig.isInitialized() && _rsConfig.hasReplicaSetId() &&
replMetadata.getValue().getReplicaSetId().isSet() &&
_rsConfig.getReplicaSetId() != replMetadata.getValue().getReplicaSetId()) {
- responseStatus = Status(ErrorCodes::InvalidReplicaSetConfig,
- str::stream() << "replica set IDs do not match, ours: "
- << _rsConfig.getReplicaSetId()
- << "; remote node's: "
- << replMetadata.getValue().getReplicaSetId());
+ responseStatus =
+ Status(ErrorCodes::InvalidReplicaSetConfig,
+ str::stream()
+ << "replica set IDs do not match, ours: " << _rsConfig.getReplicaSetId()
+ << "; remote node's: " << replMetadata.getValue().getReplicaSetId());
// Ignore metadata.
replMetadata = responseStatus;
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index 80ac8e0e2b7..3099894bcfb 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -90,17 +90,14 @@ TEST_F(ReplCoordHBV1Test,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
ReplSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "members"
+ << "version" << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
<< "h2:1")
<< BSON("_id" << 3 << "host"
<< "h3:1"))
- << "protocolVersion"
- << 1));
+ << "protocolVersion" << 1));
init("mySet");
addSelf(HostAndPort("h2", 1));
const Date_t startDate = getNet()->now();
@@ -160,21 +157,18 @@ TEST_F(ReplCoordHBV1Test,
TEST_F(ReplCoordHBV1Test,
ArbiterJoinsExistingReplSetWhenReceivingAConfigContainingTheArbiterViaHeartbeat) {
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
- << "mySet"
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1"
- << "arbiterOnly"
- << true)
- << BSON("_id" << 3 << "host"
- << "h3:1"))
- << "protocolVersion"
- << 1));
+ ReplSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))
+ << "protocolVersion" << 1));
init("mySet");
addSelf(HostAndPort("h2", 1));
const Date_t startDate = getNet()->now();
@@ -238,17 +232,14 @@ TEST_F(ReplCoordHBV1Test,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
ReplSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "members"
+ << "version" << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
<< "h2:1")
<< BSON("_id" << 3 << "host"
<< "h3:1"))
- << "protocolVersion"
- << 1));
+ << "protocolVersion" << 1));
init("mySet");
addSelf(HostAndPort("h4", 1));
const Date_t startDate = getNet()->now();
@@ -323,9 +314,7 @@ TEST_F(ReplCoordHBV1Test,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -338,12 +327,12 @@ TEST_F(ReplCoordHBV1Test,
const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
log() << request.target.toString() << " processing " << request.cmdObj;
- getNet()->scheduleResponse(noi,
- getNet()->now(),
- makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
- << "unauth'd"
- << "code"
- << ErrorCodes::Unauthorized)));
+ getNet()->scheduleResponse(
+ noi,
+ getNet()->now(),
+ makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
+ << "unauth'd"
+ << "code" << ErrorCodes::Unauthorized)));
if (request.target != HostAndPort("node2", 12345) &&
request.cmdObj.firstElement().fieldNameStringData() != "replSetHeartbeat") {
@@ -364,15 +353,11 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
HostAndPort host2("node2:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host" << host2.toString()))
- << "settings"
- << BSON("replicaSetId" << OID::gen())
- << "protocolVersion"
+ << "settings" << BSON("replicaSetId" << OID::gen()) << "protocolVersion"
<< 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -444,10 +429,9 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
ASSERT_EQ(MemberState(MemberState::RS_DOWN).toString(),
MemberState(member["state"].numberInt()).toString());
ASSERT_EQ(member["lastHeartbeatMessage"].String(),
- std::string(str::stream() << "replica set IDs do not match, ours: "
- << rsConfig.getReplicaSetId()
- << "; remote node's: "
- << unexpectedId));
+ std::string(str::stream()
+ << "replica set IDs do not match, ours: " << rsConfig.getReplicaSetId()
+ << "; remote node's: " << unexpectedId));
}
TEST_F(ReplCoordHBV1Test,
@@ -455,19 +439,14 @@ TEST_F(ReplCoordHBV1Test,
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
ASSERT_EQUALS(OpTime(), getReplCoord()->getLastCommittedOpTime());
@@ -538,19 +517,14 @@ TEST_F(ReplCoordHBV1Test, LastCommittedOpTimeOnlyUpdatesFromHeartbeatInFCV42) {
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
ASSERT_EQUALS(OpTime(), getReplCoord()->getLastCommittedOpTime());
@@ -620,19 +594,14 @@ TEST_F(ReplCoordHBV1Test, LastCommittedOpTimeOnlyUpdatesFromHeartbeatIfNotInStar
// if we are in STARTUP2.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(), getReplCoord()->getLastCommittedOpTime());
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 3fad34dfe2a..738ff86ef87 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -73,9 +73,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenReconfigReceivedWhileSecondary) {
init();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -99,9 +97,7 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
// start up, become primary, receive uninitializable config
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -117,21 +113,14 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "invalidlyNamedField"
- << 3
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "invalidlyNamedField"
+ << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "node2:12345"
- << "arbiterOnly"
- << true)));
+ << "arbiterOnly" << true)));
const auto opCtx = makeOperationContext();
// ErrorCodes::BadValue should be propagated from ReplSetConfig::initialize()
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
@@ -143,9 +132,7 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
// start up, become primary, receive config with incorrect replset name
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -161,11 +148,7 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "notMySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -181,15 +164,12 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
// start up, become primary, receive config with incorrect replset name
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("replicaSetId" << OID::gen())),
+ << "settings" << BSON("replicaSetId" << OID::gen())),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -201,17 +181,12 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("replicaSetId" << OID::gen()));
+ << "settings" << BSON("replicaSetId" << OID::gen()));
const auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
@@ -224,9 +199,7 @@ TEST_F(ReplCoordTest,
// start up, become primary, validate fails
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -242,11 +215,7 @@ TEST_F(ReplCoordTest,
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << -3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << -3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -266,9 +235,7 @@ void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord,
replCoord->processReplSetInitiate(opCtx,
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -285,17 +252,12 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord,
// Replica set id will be copied from existing configuration.
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"
- << "priority"
- << 3)));
+ << "priority" << 3)));
*status = replCoord->processReplSetReconfig(opCtx, args, &garbage);
}
@@ -305,9 +267,7 @@ TEST_F(ReplCoordTest,
// containing a higher config version
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -348,9 +308,7 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenSavingANewConfigFailsDuringRe
// start up, become primary, saving the config fails
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -377,9 +335,7 @@ TEST_F(ReplCoordTest,
// start up, become primary, reconfig, then before that reconfig concludes, reconfig again
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -404,11 +360,7 @@ TEST_F(ReplCoordTest,
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -443,11 +395,7 @@ TEST_F(ReplCoordTest, NodeReturnsConfigurationInProgressWhenReceivingAReconfigWh
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -464,15 +412,12 @@ TEST_F(ReplCoordTest, PrimaryNodeAcceptsNewConfigWhenReceivingAReconfigWithAComp
// start up, become primary, reconfig successfully
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("replicaSetId" << OID::gen())),
+ << "settings" << BSON("replicaSetId" << OID::gen())),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -512,9 +457,7 @@ TEST_F(
// from reconfig
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -538,11 +481,7 @@ TEST_F(
config
.initialize(BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -581,9 +520,7 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
// start up, become primary, reconfig, while reconfigging receive reconfig via heartbeat
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -614,9 +551,7 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
config
.initialize(BSON("_id"
<< "mySet"
- << "version"
- << 4
- << "members"
+ << "version" << 4 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -652,9 +587,7 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary
init();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -670,11 +603,7 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index f355d04bba0..c387e99bec6 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -121,15 +121,12 @@ void killOperation(OperationContext* opCtx) {
TEST_F(ReplCoordTest, IsMasterIsFalseDuringStepdown) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
auto replCoord = getReplCoord();
@@ -163,9 +160,7 @@ TEST_F(ReplCoordTest, IsMasterIsFalseDuringStepdown) {
TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
@@ -176,13 +171,10 @@ TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig)
TEST_F(ReplCoordTest, NodeEntersArbiterStateWhenStartingUpWithValidLocalConfigWhereItIsAnArbiter) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "node2:12345"))),
HostAndPort("node1", 12345));
@@ -194,9 +186,7 @@ TEST_F(ReplCoordTest, NodeEntersRemovedStateWhenStartingUpWithALocalConfigWhichL
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -213,9 +203,7 @@ TEST_F(ReplCoordTest,
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "notMySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
@@ -256,9 +244,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -272,9 +258,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result2));
@@ -297,9 +281,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"
<< "arbiterOnly"
@@ -328,9 +310,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -348,9 +328,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node4"))),
&result));
@@ -364,9 +342,7 @@ void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) {
replCoord->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345")
<< BSON("_id" << 1 << "host"
@@ -461,9 +437,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "wrongSet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -492,8 +466,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithoutAn_
BSONObjBuilder result1;
auto status = getReplCoord()->processReplSetInitiate(
opCtx.get(),
- BSON("version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node1:12345"))),
+ BSON("version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345"))),
&result1);
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, status);
ASSERT_STRING_CONTAINS(status.reason(), "Missing expected field \"_id\"");
@@ -512,9 +487,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1);
@@ -535,9 +508,7 @@ TEST_F(ReplCoordTest, InitiateFailsWithoutReplSetFlag) {
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -558,9 +529,7 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenInitiateCannotWriteConfigToDi
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -612,13 +581,10 @@ TEST_F(
TEST_F(ReplCoordTest, NodeReturnsOkWhenCheckReplEnabledForCommandAfterReceivingAConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
// check status OK and result is empty
@@ -648,21 +614,16 @@ TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstASta
TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASecondaryNode) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -683,21 +644,16 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASec
TEST_F(ReplCoordTest, NodeReturnsOkWhenRunningAwaitReplicationAgainstPrimaryWithWTermOne) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
OpTimeWithTermOne time(100, 1);
@@ -725,25 +681,19 @@ TEST_F(ReplCoordTest,
NodeReturnsWriteConcernFailedUntilASufficientNumberOfNodesHaveTheWriteDurable) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3))),
+ << "_id" << 3))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -805,25 +755,19 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, NodeReturnsWriteConcernFailedUntilASufficientNumberOfNodesHaveTheWrite) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3))),
+ << "_id" << 3))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -882,9 +826,7 @@ TEST_F(ReplCoordTest,
NodeReturnsUnknownReplWriteConcernWhenAwaitReplicationReceivesAnInvalidWriteConcernMode) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node0")
<< BSON("_id" << 1 << "host"
@@ -921,9 +863,7 @@ TEST_F(
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node0"
<< "tags"
@@ -1103,21 +1043,16 @@ private:
TEST_F(ReplCoordTest, NodeReturnsOkWhenAWriteConcernWithNoTimeoutHasBeenSatisfied) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -1167,21 +1102,16 @@ TEST_F(ReplCoordTest, NodeReturnsOkWhenAWriteConcernWithNoTimeoutHasBeenSatisfie
TEST_F(ReplCoordTest, NodeReturnsWriteConcernFailedWhenAWriteConcernTimesOutBeforeBeingSatisified) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -1218,21 +1148,16 @@ TEST_F(ReplCoordTest,
NodeReturnsShutDownInProgressWhenANodeShutsDownPriorToSatisfyingAWriteConcern) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -1268,21 +1193,16 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenSteppingDownBeforeSatisfyingAWrite
// if the node steps down while it is waiting.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -1316,9 +1236,7 @@ TEST_F(ReplCoordTest,
// Tests that a thread blocked in awaitReplication can be killed by a killOp operation
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1")
<< BSON("_id" << 1 << "host"
@@ -1436,9 +1354,7 @@ private:
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -1463,9 +1379,7 @@ TEST_F(ReplCoordTest, UpdatePositionArgsAdvancesWallTimes) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -1499,33 +1413,27 @@ TEST_F(ReplCoordTest, UpdatePositionArgsAdvancesWallTimes) {
ASSERT_OK(updatePositionArgsInitialize(
updatePositionArgs,
- BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << opTime2.asOpTime().toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << memberOneAppliedWallTime
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << opTime2.asOpTime().toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << memberOneDurableWallTime)
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << opTime2.asOpTime().toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << memberTwoAppliedWallTime
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << opTime2.asOpTime().toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << memberTwoDurableWallTime)))));
+ BSON(
+ UpdatePositionArgs::kCommandFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(
+ UpdatePositionArgs::kConfigVersionFieldName
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << opTime2.asOpTime().toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName << memberOneAppliedWallTime
+ << UpdatePositionArgs::kDurableOpTimeFieldName << opTime2.asOpTime().toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName << memberOneDurableWallTime)
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 2
+ << UpdatePositionArgs::kAppliedOpTimeFieldName
+ << opTime2.asOpTime().toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << memberTwoAppliedWallTime
+ << UpdatePositionArgs::kDurableOpTimeFieldName
+ << opTime2.asOpTime().toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << memberTwoDurableWallTime)))));
ASSERT_OK(repl->processReplSetUpdatePosition(updatePositionArgs, &configVersion));
@@ -1547,17 +1455,14 @@ TEST_F(ReplCoordTest, ElectionIdTracksTermInPV1) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("test1", 1234));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -1611,17 +1516,14 @@ TEST_F(ReplCoordTest, NodeChangesTermAndStepsDownWhenAndOnlyWhenUpdateTermSuppli
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("test1", 1234));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -1666,17 +1568,14 @@ TEST_F(ReplCoordTest, ConcurrentStepDownShouldNotSignalTheSameFinishEventMoreTha
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("test1", 1234));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -1732,17 +1631,14 @@ TEST_F(ReplCoordTest, DrainCompletionMidStepDown) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("test1", 1234));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -1804,12 +1700,9 @@ TEST_F(StepDownTest, StepDownCanCompleteBasedOnReplSetUpdatePositionAlone) {
ASSERT_OK(updatePositionArgsInitialize(
updatePositionArgs,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 1
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime2.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1819,9 +1712,7 @@ TEST_F(StepDownTest, StepDownCanCompleteBasedOnReplSetUpdatePositionAlone) {
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(opTime2.asOpTime().getSecs()))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 2
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime1.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1906,17 +1797,12 @@ private:
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 2 << "host"
<< "test3:1234"))),
HostAndPort("test1", 1234));
@@ -1956,12 +1842,9 @@ TEST_F(StepDownTestWithUnelectableNode,
ASSERT_OK(updatePositionArgsInitialize(
catchupFirstSecondary,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 1
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime2.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1971,9 +1854,7 @@ TEST_F(StepDownTestWithUnelectableNode,
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(opTime2.asOpTime().getSecs()))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 2
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime1.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1995,12 +1876,9 @@ TEST_F(StepDownTestWithUnelectableNode,
ASSERT_OK(updatePositionArgsInitialize(
catchupOtherSecondary,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 1
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime2.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -2010,9 +1888,7 @@ TEST_F(StepDownTestWithUnelectableNode,
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(opTime2.asOpTime().getSecs()))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 2
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime2.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -2141,9 +2017,7 @@ private:
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2240,14 +2114,10 @@ TEST_F(ReplCoordTest, SingleNodeReplSetStepDownTimeoutAndElectionTimeoutExpiresA
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 1000)),
HostAndPort("test1", 1234));
auto opCtx = makeOperationContext();
@@ -2276,14 +2146,10 @@ TEST_F(ReplCoordTest, SingleNodeReplSetUnfreeze) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 10000)),
HostAndPort("test1", 1234));
auto opCtx = makeOperationContext();
@@ -2341,9 +2207,7 @@ TEST_F(ReplCoordTest, NodeBecomesPrimaryAgainWhenStepDownTimeoutExpiresInASingle
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -2406,9 +2270,7 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -2762,13 +2624,10 @@ TEST_F(ReplCoordTest,
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
}
@@ -2776,9 +2635,7 @@ TEST_F(ReplCoordTest, NodeIncludesOtherMembersProgressInUpdatePositionCommand) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2855,11 +2712,7 @@ TEST_F(ReplCoordTest,
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2882,11 +2735,7 @@ TEST_F(ReplCoordTest,
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2918,11 +2767,7 @@ TEST_F(ReplCoordTest, AllowAsManyUnsetMaintenanceModesAsThereHaveBeenSetMaintena
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2952,11 +2797,7 @@ TEST_F(ReplCoordTest, SettingAndUnsettingMaintenanceModeShouldNotAffectRollbackS
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2998,11 +2839,7 @@ TEST_F(ReplCoordTest, DoNotAllowMaintenanceModeWhilePrimary) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -3037,11 +2874,7 @@ TEST_F(ReplCoordTest, DoNotAllowSettingMaintenanceModeWhileConductingAnElection)
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -3109,9 +2942,7 @@ TEST_F(ReplCoordTest,
HostAndPort client2Host("node3:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << myHost.toString())
<< BSON("_id" << 1 << "host" << client1Host.toString())
<< BSON("_id" << 2 << "host" << client2Host.toString()))),
@@ -3154,9 +2985,7 @@ TEST_F(ReplCoordTest,
HostAndPort client2Host("node3:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << myHost.toString())
<< BSON("_id" << 1 << "host" << client1Host.toString())
<< BSON("_id" << 2 << "host" << client2Host.toString()))),
@@ -3192,19 +3021,14 @@ TEST_F(ReplCoordTest, NodeReturnsNoNodesWhenGetOtherNodesInReplSetIsRunBeforeHav
TEST_F(ReplCoordTest, NodeReturnsListOfNodesOtherThanItselfInResponseToGetOtherNodesInReplSet) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h1")
<< BSON("_id" << 1 << "host"
<< "h2")
<< BSON("_id" << 2 << "host"
<< "h3"
- << "priority"
- << 0
- << "hidden"
- << true))),
+ << "priority" << 0 << "hidden" << true))),
HostAndPort("h1"));
std::vector<HostAndPort> otherNodes = getReplCoord()->getOtherNodesInReplSet();
@@ -3242,9 +3066,7 @@ TEST_F(ReplCoordTest, IsMaster) {
BSON(
"_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << h1.toString())
<< BSON("_id" << 1 << "host" << h2.toString())
<< BSON("_id" << 2 << "host" << h3.toString() << "arbiterOnly" << true)
@@ -3307,9 +3129,7 @@ TEST_F(ReplCoordTest, IsMasterWithCommittedSnapshot) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3339,9 +3159,7 @@ TEST_F(ReplCoordTest, IsMasterInShutdown) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3377,21 +3195,16 @@ TEST_F(ReplCoordTest, LogAMessageWhenShutDownBeforeReplicationStartUpFinished) {
TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -3418,18 +3231,13 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
ASSERT_OK(updatePositionArgsInitialize(
args,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 0
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.toBSON()
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 0
+ << UpdatePositionArgs::kDurableOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs())
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.toBSON()
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs()))))));
@@ -3441,21 +3249,16 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -3476,18 +3279,13 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect)
ASSERT_OK(updatePositionArgsInitialize(
args,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 3
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.toBSON()
+ << 3 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kDurableOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs())
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.toBSON()
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs()))))));
@@ -3504,21 +3302,16 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect)
TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -3539,18 +3332,13 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConf
ASSERT_OK(updatePositionArgsInitialize(
args,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 9
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.toBSON()
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 9
+ << UpdatePositionArgs::kDurableOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs())
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.toBSON()
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs()))))));
@@ -3567,21 +3355,16 @@ TEST_F(ReplCoordTest,
ProcessUpdateWhenUpdatePositionContainsOnlyConfigVersionAndMemberIdsWithoutRIDs) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -3605,32 +3388,26 @@ TEST_F(ReplCoordTest,
ASSERT_OK(updatePositionArgsInitialize(
args,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.asOpTime().toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(time2.asOpTime().getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.asOpTime().toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(time2.asOpTime().getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.asOpTime().toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(time2.asOpTime().getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.asOpTime().toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(time2.asOpTime().getSecs()))))));
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.asOpTime().toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(time2.asOpTime().getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << time2.asOpTime().toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(time2.asOpTime().getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << UpdatePositionArgs::kAppliedOpTimeFieldName
+ << time2.asOpTime().toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(time2.asOpTime().getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName
+ << time2.asOpTime().toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(time2.asOpTime().getSecs()))))));
auto opCtx = makeOperationContext();
@@ -3651,15 +3428,10 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 1 << "host"
<< "node2:12345")
<< BSON("_id" << 2 << "host"
@@ -3670,21 +3442,16 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
TEST_F(ReplCoordTest, AwaitReplicationShouldResolveAsNormalDuringAReconfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
// Turn off readconcern majority support, and snapshots.
@@ -3751,11 +3518,7 @@ void doReplSetReconfigToFewer(ReplicationCoordinatorImpl* replCoord, Status* sta
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -3768,21 +3531,16 @@ TEST_F(
NodeReturnsUnsatisfiableWriteConcernWhenReconfiggingToAClusterThatCannotSatisfyTheWriteConcern) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 2), Date_t() + Seconds(100));
@@ -3829,29 +3587,22 @@ TEST_F(ReplCoordTest,
NodeReturnsOKFromAwaitReplicationWhenReconfiggingToASetWhereMajorityIsSmallerAndSatisfied) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3)
+ << "_id" << 3)
<< BSON("host"
<< "node5:12345"
- << "_id"
- << 4))),
+ << "_id" << 4))),
HostAndPort("node1", 12345));
// Turn off readconcern majority support, and snapshots.
@@ -3912,35 +3663,22 @@ TEST_F(ReplCoordTest,
// satisfied by voting data-bearing members.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "_id" << 3 << "votes" << 0 << "priority" << 0)
<< BSON("host"
<< "node5:12345"
- << "_id"
- << 4
- << "arbiterOnly"
- << true))),
+ << "_id" << 4 << "arbiterOnly" << true))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
OpTime time(Timestamp(100, 1), 1);
@@ -3980,35 +3718,22 @@ TEST_F(ReplCoordTest,
// Test that the commit level advances properly.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "_id" << 3 << "votes" << 0 << "priority" << 0)
<< BSON("host"
<< "node5:12345"
- << "_id"
- << 4
- << "arbiterOnly"
- << true))),
+ << "_id" << 4 << "arbiterOnly" << true))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
OpTime zero(Timestamp(0, 0), 0);
@@ -4240,11 +3965,7 @@ TEST_F(StableOpTimeTest, SetMyLastAppliedSetsStableOpTimeForStorage) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -4308,11 +4029,7 @@ TEST_F(StableOpTimeTest, SetMyLastAppliedSetsStableOpTimeForStorageDisableMajori
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -4345,11 +4062,7 @@ TEST_F(StableOpTimeTest, AdvanceCommitPointSetsStableOpTimeForStorage) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -4407,15 +4120,11 @@ TEST_F(StableOpTimeTest, ClearOpTimeCandidatesPastCommonPointAfterRollback) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))
- << "protocolVersion"
- << 1),
+ << "_id" << 0))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
auto repl = getReplCoord();
@@ -4511,13 +4220,10 @@ TEST_F(StableOpTimeTest, OpTimeCandidatesAreNotAddedWhenStateIsNotConsistent) {
TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDuringShutdown) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(10, 1), Date_t() + Seconds(100));
@@ -4536,13 +4242,10 @@ TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDurin
TEST_F(ReplCoordTest, NodeReturnsInterruptedWhenWaitingUntilAnOpTimeIsInterrupted) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(10, 1), Date_t() + Seconds(100));
@@ -4560,13 +4263,10 @@ TEST_F(ReplCoordTest, NodeReturnsInterruptedWhenWaitingUntilAnOpTimeIsInterrupte
TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesNoOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -4577,13 +4277,10 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesNoOpTi
TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTimePriorToOurLast) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -4599,13 +4296,10 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTi
TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTimeEqualToOurLast) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -4646,13 +4340,10 @@ TEST_F(ReplCoordTest, NodeReturnsNotAReplicaSetWhenWaitUntilOpTimeIsRunAgainstAS
TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -4672,13 +4363,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) {
TEST_F(ReplCoordTest, ReadAfterCommittedInterrupted) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
const auto opCtx = makeOperationContext();
runSingleNodeElection(opCtx.get());
@@ -4695,13 +4383,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedInterrupted) {
TEST_F(ReplCoordTest, ReadAfterCommittedGreaterOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
runSingleNodeElection(opCtx.get());
@@ -4717,13 +4402,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedGreaterOpTime) {
TEST_F(ReplCoordTest, ReadAfterCommittedEqualOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
runSingleNodeElection(opCtx.get());
@@ -4739,13 +4421,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedEqualOpTime) {
TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -4767,13 +4446,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) {
TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
runSingleNodeElection(opCtx.get());
@@ -4797,13 +4473,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
TEST_F(ReplCoordTest, WaitUntilOpTimeforReadRejectsUnsupportedMajorityReadConcern) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -4829,21 +4502,16 @@ TEST_F(ReplCoordTest, IgnoreTheContentsOfMetadataWhenItsConfigVersionDoesNotMatc
// Ensure that we do not process ReplSetMetadata when ConfigVersions do not match.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
@@ -4851,35 +4519,20 @@ TEST_F(ReplCoordTest, IgnoreTheContentsOfMetadataWhenItsConfigVersionDoesNotMatc
StatusWith<rpc::ReplSetMetadata> metadata = replReadFromMetadata(BSON(
rpc::kReplSetMetadataFieldName << BSON(
"lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 2)
- << "configVersion"
- << 1
- << "primaryIndex"
- << 2
- << "term"
- << 2
- << "syncSourceIndex"
- << 1)));
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "configVersion" << 1
+ << "primaryIndex" << 2 << "term" << 2 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata.getValue());
ASSERT_EQUALS(0, getReplCoord()->getTerm());
// higher configVersion
- StatusWith<rpc::ReplSetMetadata> metadata2 = replReadFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 2)
- << "configVersion"
- << 100
- << "primaryIndex"
- << 2
- << "term"
- << 2
- << "syncSourceIndex"
- << 1)));
+ StatusWith<rpc::ReplSetMetadata> metadata2 = replReadFromMetadata(
+ BSON(rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastCommittedWall"
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "configVersion" << 100
+ << "primaryIndex" << 2 << "term" << 2 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata2.getValue());
ASSERT_EQUALS(0, getReplCoord()->getTerm());
}
@@ -4889,23 +4542,17 @@ TEST_F(ReplCoordTest, UpdateLastCommittedOpTimeWhenTheLastCommittedOpTimeIsNewer
// but not if the OpTime is older than the current LastCommittedOpTime.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))
- << "protocolVersion"
- << 1),
+ << "_id" << 2))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
@@ -4935,23 +4582,17 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
// Ensure that currentPrimaryIndex is never altered by ReplSetMetadata.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))
- << "protocolVersion"
- << 1),
+ << "_id" << 2))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
@@ -4962,17 +4603,9 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
StatusWith<rpc::ReplSetMetadata> metadata = replReadFromMetadata(BSON(
rpc::kReplSetMetadataFieldName << BSON(
"lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 3)
- << "configVersion"
- << 2
- << "primaryIndex"
- << 2
- << "term"
- << 3
- << "syncSourceIndex"
- << 1)));
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "configVersion" << 2
+ << "primaryIndex" << 2 << "term" << 3 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata.getValue());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
ASSERT_EQUALS(-1, getTopoCoord().getCurrentPrimaryIndex());
@@ -4982,17 +4615,9 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
StatusWith<rpc::ReplSetMetadata> metadata2 = replReadFromMetadata(BSON(
rpc::kReplSetMetadataFieldName << BSON(
"lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(11, 0) << "t" << 3)
- << "configVersion"
- << 2
- << "primaryIndex"
- << 1
- << "term"
- << 2
- << "syncSourceIndex"
- << 1)));
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "configVersion" << 2
+ << "primaryIndex" << 1 << "term" << 2 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata2.getValue());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
ASSERT_EQUALS(-1, getTopoCoord().getCurrentPrimaryIndex());
@@ -5002,17 +4627,9 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
StatusWith<rpc::ReplSetMetadata> metadata3 = replReadFromMetadata(BSON(
rpc::kReplSetMetadataFieldName << BSON(
"lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(11, 0) << "t" << 3)
- << "configVersion"
- << 2
- << "primaryIndex"
- << 1
- << "term"
- << 3
- << "syncSourceIndex"
- << 1)));
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "configVersion" << 2
+ << "primaryIndex" << 1 << "term" << 3 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata3.getValue());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
ASSERT_EQUALS(-1, getTopoCoord().getCurrentPrimaryIndex());
@@ -5024,19 +4641,14 @@ TEST_F(ReplCoordTest,
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
@@ -5048,19 +4660,12 @@ TEST_F(ReplCoordTest,
// Higher term - should update term but not last committed optime.
StatusWith<rpc::ReplSetMetadata> metadata = replReadFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 3)
- << "configVersion"
- << config.getConfigVersion()
- << "primaryIndex"
- << 1
- << "term"
- << 3
- << "syncSourceIndex"
- << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3)
+ << "lastCommittedWall" << Date_t() + Seconds(100)
+ << "lastOpVisible" << BSON("ts" << Timestamp(10, 0) << "t" << 3)
+ << "configVersion" << config.getConfigVersion() << "primaryIndex"
+ << 1 << "term" << 3 << "syncSourceIndex" << 1)));
BSONObjBuilder responseBuilder;
ASSERT_OK(metadata.getValue().writeToMetadata(&responseBuilder));
@@ -5090,19 +4695,14 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, AdvanceCommitPointFromSyncSourceCanSetCommitPointToLastAppliedIgnoringTerm) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(), getReplCoord()->getLastCommittedOpTime());
@@ -5120,23 +4720,17 @@ TEST_F(ReplCoordTest, AdvanceCommitPointFromSyncSourceCanSetCommitPointToLastApp
TEST_F(ReplCoordTest, PrepareOplogQueryMetadata) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))
- << "protocolVersion"
- << 1),
+ << "_id" << 2))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -5184,21 +4778,14 @@ TEST_F(ReplCoordTest, TermAndLastCommittedOpTimeUpdatedFromHeartbeatWhenArbiter)
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0
- << "arbiterOnly"
- << true)
+ << "_id" << 0 << "arbiterOnly" << true)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
@@ -5211,19 +4798,12 @@ TEST_F(ReplCoordTest, TermAndLastCommittedOpTimeUpdatedFromHeartbeatWhenArbiter)
// Higher term - should update term and lastCommittedOpTime since arbiters learn of the
// commit point via heartbeats.
StatusWith<rpc::ReplSetMetadata> metadata = replReadFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 1) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 1) << "t" << 3)
- << "configVersion"
- << config.getConfigVersion()
- << "primaryIndex"
- << 1
- << "term"
- << 3
- << "syncSourceIndex"
- << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 1) << "t" << 3)
+ << "lastCommittedWall" << Date_t() + Seconds(100)
+ << "lastOpVisible" << BSON("ts" << Timestamp(10, 1) << "t" << 3)
+ << "configVersion" << config.getConfigVersion() << "primaryIndex"
+ << 1 << "term" << 3 << "syncSourceIndex" << 1)));
BSONObjBuilder responseBuilder;
ASSERT_OK(metadata.getValue().writeToMetadata(&responseBuilder));
@@ -5254,19 +4834,13 @@ TEST_F(ReplCoordTest,
ScheduleElectionToBeRunInElectionTimeoutFromNowWhenCancelAndRescheduleElectionTimeoutIsRun) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5305,19 +4879,13 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunInRollback) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5336,23 +4904,13 @@ TEST_F(ReplCoordTest,
DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunWhileUnelectable) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0
- << "priority"
- << 0
- << "hidden"
- << true)
+ << "_id" << 0 << "priority" << 0 << "hidden" << true)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
ASSERT_OK(replCoord->setFollowerMode(MemberState::RS_SECONDARY));
@@ -5367,19 +4925,13 @@ TEST_F(ReplCoordTest,
DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunWhileRemoved) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5405,15 +4957,10 @@ TEST_F(ReplCoordTest,
config
.initialize(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 3
- << "members"
+ << "protocolVersion" << 1 << "version" << 3 << "members"
<< BSON_ARRAY(BSON("host"
<< "node2:12345"
- << "_id"
- << 1))))
+ << "_id" << 1))))
.transitional_ignore();
hbResp.setConfig(config);
hbResp.setConfigVersion(3);
@@ -5437,19 +4984,13 @@ TEST_F(ReplCoordTest,
RescheduleElectionTimeoutWhenProcessingHeartbeatResponseFromPrimaryInSameTerm) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5494,19 +5035,13 @@ TEST_F(ReplCoordTest,
DontRescheduleElectionTimeoutWhenProcessingHeartbeatResponseFromPrimaryInDiffertTerm) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5548,19 +5083,13 @@ TEST_F(ReplCoordTest,
CancelAndRescheduleElectionTimeoutWhenProcessingHeartbeatResponseWithoutState) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5600,9 +5129,7 @@ TEST_F(ReplCoordTest, AdvanceCommittedSnapshotToMostRecentSnapshotPriorToOpTimeW
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -5636,9 +5163,7 @@ TEST_F(ReplCoordTest, ZeroCommittedSnapshotWhenAllSnapshotsAreDropped) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -5668,9 +5193,7 @@ TEST_F(ReplCoordTest, DoNotAdvanceCommittedSnapshotWhenAppliedOpTimeChanges) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -5693,13 +5216,10 @@ TEST_F(ReplCoordTest,
NodeChangesMyLastOpTimeWhenAndOnlyWhensetMyLastDurableOpTimeReceivesANewerOpTime4DurableSE) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5723,13 +5243,10 @@ DEATH_TEST_F(ReplCoordTest,
"opTime.getTimestamp() > myLastAppliedOpTime.getTimestamp()") {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5749,13 +5266,10 @@ DEATH_TEST_F(ReplCoordTest,
"opTime.getTimestamp() > myLastAppliedOpTime.getTimestamp()") {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5775,13 +5289,10 @@ DEATH_TEST_F(ReplCoordTest,
"opTime.getTimestamp() < myLastAppliedOpTime.getTimestamp()") {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5801,13 +5312,10 @@ DEATH_TEST_F(ReplCoordTest,
"opTime.getTimestamp() < myLastAppliedOpTime.getTimestamp()") {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5826,18 +5334,14 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("test1", 1234));
OpTime optime(Timestamp(100, 2), 0);
@@ -5900,18 +5404,14 @@ TEST_F(ReplCoordTest, UpdatePositionCmdHasMetadata) {
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("test1", 1234));
OpTime optime(Timestamp(100, 2), 0);
@@ -5941,32 +5441,23 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3)
+ << "_id" << 3)
<< BSON("host"
<< "node5:12345"
- << "_id"
- << 4))
- << "protocolVersion"
- << 1
- << "settings"
+ << "_id" << 4))
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -5978,57 +5469,42 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
UpdatePositionArgs args;
ASSERT_OK(updatePositionArgsInitialize(
args,
- BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 3
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 4
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))))));
+ BSON(
+ UpdatePositionArgs::kCommandFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 3
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 4
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, nullptr));
// Become PRIMARY.
@@ -6038,33 +5514,26 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
UpdatePositionArgs args1;
ASSERT_OK(updatePositionArgsInitialize(
args1,
- BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())))),
+ BSON(
+ UpdatePositionArgs::kCommandFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())))),
/*requireWallTime*/ true));
const Date_t startDate = getNet()->now();
getNet()->enterNetwork();
@@ -6106,20 +5575,16 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
ASSERT_OK(updatePositionArgsInitialize(
args2,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))))));
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args2, nullptr));
hbArgs.setSetName("mySet");
@@ -6149,9 +5614,7 @@ TEST_F(ReplCoordTest, WaitForMemberState) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -6186,9 +5649,7 @@ TEST_F(ReplCoordTest, WaitForDrainFinish) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -6227,13 +5688,10 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault"
- << false),
+ << "writeConcernMajorityJournalDefault" << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -6250,13 +5708,10 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault"
- << true),
+ << "writeConcernMajorityJournalDefault" << true),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -6271,13 +5726,10 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfSync
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault"
- << false),
+ << "writeConcernMajorityJournalDefault" << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -6299,13 +5751,10 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfWMod
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault"
- << false),
+ << "writeConcernMajorityJournalDefault" << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -6322,21 +5771,16 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfWMod
TEST_F(ReplCoordTest, NodeStoresElectionVotes) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
auto time = OpTimeWithTermOne(100, 1);
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6349,15 +5793,9 @@ TEST_F(ReplCoordTest, NodeStoresElectionVotes) {
ReplSetRequestVotesArgs args;
ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "mySet"
- << "term"
- << 7LL
- << "candidateIndex"
- << 2LL
- << "configVersion"
- << 2LL
- << "dryRun"
- << false
- << "lastCommittedOp"
+ << "term" << 7LL << "candidateIndex" << 2LL
+ << "configVersion" << 2LL << "dryRun"
+ << false << "lastCommittedOp"
<< time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
@@ -6376,21 +5814,16 @@ TEST_F(ReplCoordTest, NodeStoresElectionVotes) {
TEST_F(ReplCoordTest, NodeDoesNotStoreDryRunVotes) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
auto time = OpTimeWithTermOne(100, 1);
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6403,15 +5836,9 @@ TEST_F(ReplCoordTest, NodeDoesNotStoreDryRunVotes) {
ReplSetRequestVotesArgs args;
ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "mySet"
- << "term"
- << 7LL
- << "candidateIndex"
- << 2LL
- << "configVersion"
- << 2LL
- << "dryRun"
- << true
- << "lastCommittedOp"
+ << "term" << 7LL << "candidateIndex" << 2LL
+ << "configVersion" << 2LL << "dryRun"
+ << true << "lastCommittedOp"
<< time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
@@ -6432,17 +5859,13 @@ TEST_F(ReplCoordTest, NodeFailsVoteRequestIfItFailsToStoreLastVote) {
// Set up a 2-node replica set config.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
auto time = OpTimeWithTermOne(100, 1);
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6457,18 +5880,12 @@ TEST_F(ReplCoordTest, NodeFailsVoteRequestIfItFailsToStoreLastVote) {
auto opCtx = makeOperationContext();
ReplSetRequestVotesArgs args;
- ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
- << "mySet"
- << "term"
- << initTerm + 1 // term of new candidate.
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 2LL
- << "dryRun"
- << false
- << "lastCommittedOp"
- << time.asOpTime().toBSON())));
+ ASSERT_OK(args.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "mySet"
+ << "term" << initTerm + 1 // term of new candidate.
+ << "candidateIndex" << 1LL << "configVersion" << 2LL << "dryRun"
+ << false << "lastCommittedOp" << time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
// Simulate a failure to write the 'last vote' document. The specific error code isn't
@@ -6493,17 +5910,13 @@ TEST_F(ReplCoordTest, NodeNodesNotGrantVoteIfInTerminalShutdown) {
// Set up a 2-node replica set config.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
auto time = OpTimeWithTermOne(100, 1);
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6518,18 +5931,12 @@ TEST_F(ReplCoordTest, NodeNodesNotGrantVoteIfInTerminalShutdown) {
auto opCtx = makeOperationContext();
ReplSetRequestVotesArgs args;
- ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
- << "mySet"
- << "term"
- << initTerm + 1 // term of new candidate.
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 2LL
- << "dryRun"
- << false
- << "lastCommittedOp"
- << time.asOpTime().toBSON())));
+ ASSERT_OK(args.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "mySet"
+ << "term" << initTerm + 1 // term of new candidate.
+ << "candidateIndex" << 1LL << "configVersion" << 2LL << "dryRun"
+ << false << "lastCommittedOp" << time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
getReplCoord()->enterTerminalShutdown();
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index 058d841d597..a2e5e2d9e8a 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -277,14 +277,13 @@ void ReplCoordTest::simulateSuccessfulDryRun(
if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
ASSERT_TRUE(request.cmdObj.getBoolField("dryRun"));
onDryRunRequest(request);
- net->scheduleResponse(noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term"
- << request.cmdObj["term"].Long()
- << "voteGranted"
- << true)));
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true)));
voteRequests++;
} else if (consumeHeartbeatV1(noi)) {
// The heartbeat has been consumed.
@@ -346,14 +345,13 @@ void ReplCoordTest::simulateSuccessfulV1ElectionWithoutExitingDrainMode(Date_t e
hbResp.setConfigVersion(rsConfig.getConfigVersion());
net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON()));
} else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
- net->scheduleResponse(noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term"
- << request.cmdObj["term"].Long()
- << "voteGranted"
- << true)));
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true)));
} else {
error() << "Black holing unexpected request to " << request.target << ": "
<< request.cmdObj;
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 7a1fcbd0e0f..c20aa0c87d4 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -67,10 +67,10 @@ namespace mongo {
MONGO_FAIL_POINT_DEFINE(waitInIsMaster);
-using std::unique_ptr;
using std::list;
using std::string;
using std::stringstream;
+using std::unique_ptr;
namespace repl {
namespace {
@@ -330,8 +330,7 @@ public:
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "Unrecognized field of 'internalClient': '"
- << fieldName
- << "'");
+ << fieldName << "'");
}
}
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index b7e57eb1742..6b533d42cb1 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -398,8 +398,7 @@ void ReplicationRecoveryImpl::_applyToEndOfOplog(OperationContext* opCtx,
invariant(applyThroughOpTime.getTimestamp() == topOfOplog,
str::stream() << "Did not apply to top of oplog. Applied through: "
<< applyThroughOpTime.toString()
- << ". Top of oplog: "
- << topOfOplog.toString());
+ << ". Top of oplog: " << topOfOplog.toString());
oplogBuffer.shutdown(opCtx);
// We may crash before setting appliedThrough. If we have a stable checkpoint, we will recover
diff --git a/src/mongo/db/repl/replication_recovery_test.cpp b/src/mongo/db/repl/replication_recovery_test.cpp
index f8e65db8228..84d31b48a9a 100644
--- a/src/mongo/db/repl/replication_recovery_test.cpp
+++ b/src/mongo/db/repl/replication_recovery_test.cpp
@@ -1125,9 +1125,7 @@ TEST_F(ReplicationRecoveryTest, CommitTransactionOplogEntryCorrectlyUpdatesConfi
const auto txnOperations = BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << testNs.toString()
- << "o"
+ << "ns" << testNs.toString() << "o"
<< BSON("_id" << 1)));
const auto prepareDate = Date_t::now();
const auto prepareOp =
@@ -1202,9 +1200,7 @@ TEST_F(ReplicationRecoveryTest,
const auto txnOperations = BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << testNs.toString()
- << "o"
+ << "ns" << testNs.toString() << "o"
<< BSON("_id" << 1)));
const auto prepareDate = Date_t::now();
const auto prepareOp =
diff --git a/src/mongo/db/repl/reporter_test.cpp b/src/mongo/db/repl/reporter_test.cpp
index de8b60edfa4..f4538c6c6fa 100644
--- a/src/mongo/db/repl/reporter_test.cpp
+++ b/src/mongo/db/repl/reporter_test.cpp
@@ -380,8 +380,7 @@ TEST_F(ReporterTestNoTriggerAtSetUp,
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "newer config"
- << "configVersion"
- << 100));
+ << "configVersion" << 100));
ASSERT_EQUALS(Status(ErrorCodes::InvalidReplicaSetConfig, "invalid config"), reporter->join());
assertReporterDone();
@@ -400,8 +399,7 @@ TEST_F(ReporterTest, InvalidReplicaSetResponseWithSameConfigVersionOnSyncTargetS
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "invalid config"
- << "configVersion"
- << posUpdater->getConfigVersion()));
+ << "configVersion" << posUpdater->getConfigVersion()));
ASSERT_EQUALS(Status(ErrorCodes::InvalidReplicaSetConfig, "invalid config"), reporter->join());
assertReporterDone();
@@ -417,8 +415,7 @@ TEST_F(ReporterTest,
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "newer config"
- << "configVersion"
- << posUpdater->getConfigVersion() + 1));
+ << "configVersion" << posUpdater->getConfigVersion() + 1));
ASSERT_TRUE(reporter->isActive());
}
diff --git a/src/mongo/db/repl/roll_back_local_operations.cpp b/src/mongo/db/repl/roll_back_local_operations.cpp
index 09047074164..1e5b102a595 100644
--- a/src/mongo/db/repl/roll_back_local_operations.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations.cpp
@@ -123,14 +123,11 @@ StatusWith<RollBackLocalOperations::RollbackCommonPoint> RollBackLocalOperations
auto result = _localOplogIterator->next();
if (!result.isOK()) {
return Status(ErrorCodes::NoMatchingDocument,
- str::stream() << "reached beginning of local oplog: {"
- << "scanned: "
- << _scanned
- << ", theirTime: "
- << getTimestamp(operation).toString()
- << ", ourTime: "
- << getTimestamp(_localOplogValue).toString()
- << "}");
+ str::stream()
+ << "reached beginning of local oplog: {"
+ << "scanned: " << _scanned
+ << ", theirTime: " << getTimestamp(operation).toString()
+ << ", ourTime: " << getTimestamp(_localOplogValue).toString() << "}");
}
opAfterCurrentEntry = _localOplogValue.first;
_localOplogValue = result.getValue();
@@ -200,11 +197,8 @@ StatusWith<RollBackLocalOperations::RollbackCommonPoint> syncRollBackLocalOperat
}
return Status(ErrorCodes::NoMatchingDocument,
str::stream() << "reached beginning of remote oplog: {"
- << "them: "
- << remoteOplog.toString()
- << ", theirTime: "
- << theirTime.toString()
- << "}");
+ << "them: " << remoteOplog.toString()
+ << ", theirTime: " << theirTime.toString() << "}");
}
} // namespace repl
diff --git a/src/mongo/db/repl/roll_back_local_operations_test.cpp b/src/mongo/db/repl/roll_back_local_operations_test.cpp
index 1f8a933b67c..67fff417d0a 100644
--- a/src/mongo/db/repl/roll_back_local_operations_test.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations_test.cpp
@@ -50,26 +50,18 @@ BSONObj makeOp(long long seconds, long long term = 1LL) {
auto uuid = unittest::assertGet(UUID::parse("b4c66a44-c1ca-4d86-8d25-12e82fa2de5b"));
return BSON("ts" << Timestamp(seconds, seconds) << "t" << term << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "roll_back_local_operations.test"
- << "ui"
- << uuid);
+ << "ui" << uuid);
}
BSONObj makeOpWithWallClockTime(long count, long wallClockMillis, long long term = 1LL) {
auto uuid = unittest::assertGet(UUID::parse("b4c66a44-c1ca-4d86-8d25-12e82fa2de5b"));
return BSON("ts" << Timestamp(count, count) << "t" << term << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "roll_back_local_operations.test"
- << "ui"
- << uuid
- << "wall"
- << Date_t::fromMillisSinceEpoch(wallClockMillis));
+ << "ui" << uuid << "wall" << Date_t::fromMillisSinceEpoch(wallClockMillis));
};
int recordId = 0;
@@ -150,7 +142,8 @@ TEST(RollBackLocalOperationsTest, RollbackMultipleLocalOperations) {
TEST(RollBackLocalOperationsTest, RollbackOperationFailed) {
auto commonOperation = makeOpAndRecordId(1);
OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(2), commonOperation,
+ makeOpAndRecordId(2),
+ commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
auto rollbackOperation = [&](const BSONObj& operation) {
@@ -175,7 +168,10 @@ TEST(RollBackLocalOperationsTest, EndOfLocalOplog) {
TEST(RollBackLocalOperationsTest, SkipRemoteOperations) {
auto commonOperation = makeOpAndRecordId(1);
OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(5), makeOpAndRecordId(4), makeOpAndRecordId(2), commonOperation,
+ makeOpAndRecordId(5),
+ makeOpAndRecordId(4),
+ makeOpAndRecordId(2),
+ commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
auto i = localOperations.cbegin();
@@ -209,7 +205,8 @@ TEST(RollBackLocalOperationsTest, SkipRemoteOperations) {
TEST(RollBackLocalOperationsTest, SameTimestampDifferentTermsRollbackNoSuchKey) {
auto commonOperation = makeOpAndRecordId(1, 1);
OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(2, 3), commonOperation,
+ makeOpAndRecordId(2, 3),
+ commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
auto rollbackOperation = [&](const BSONObj& operation) {
@@ -242,7 +239,9 @@ TEST(SyncRollBackLocalOperationsTest, RollbackTwoOperations) {
auto commonOperation = makeOpWithWallClockTimeAndRecordId(1, 1 * 5000);
auto firstOpAfterCommonPoint = makeOpWithWallClockTimeAndRecordId(2, 2 * 60 * 60 * 24 * 1000);
OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(3), firstOpAfterCommonPoint, commonOperation,
+ makeOpAndRecordId(3),
+ firstOpAfterCommonPoint,
+ commonOperation,
});
auto i = localOperations.cbegin();
auto result = syncRollBackLocalOperations(OplogInterfaceMock(localOperations),
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index b6aca140721..73c484ec452 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -100,8 +100,9 @@ boost::optional<long long> _parseDroppedCollectionCount(const OplogEntry& oplogE
auto obj2 = oplogEntry.getObject2();
if (!obj2) {
- warning() << "Unable to get collection count from " << desc << " without the o2 "
- "field. oplog op: "
+ warning() << "Unable to get collection count from " << desc
+ << " without the o2 "
+ "field. oplog op: "
<< redact(oplogEntry.toBSON());
return boost::none;
}
@@ -324,10 +325,10 @@ Status RollbackImpl::_transitionToRollback(OperationContext* opCtx) {
auto status =
_replicationCoordinator->setFollowerModeStrict(opCtx, MemberState::RS_ROLLBACK);
if (!status.isOK()) {
- status.addContext(str::stream() << "Cannot transition from "
- << _replicationCoordinator->getMemberState().toString()
- << " to "
- << MemberState(MemberState::RS_ROLLBACK).toString());
+ status.addContext(str::stream()
+ << "Cannot transition from "
+ << _replicationCoordinator->getMemberState().toString() << " to "
+ << MemberState(MemberState::RS_ROLLBACK).toString());
log() << status;
return status;
}
@@ -416,9 +417,9 @@ StatusWith<std::set<NamespaceString>> RollbackImpl::_namespacesForOp(const Oplog
// These commands do not need to be supported by rollback. 'convertToCapped' should
// always be converted to lower level DDL operations, and 'emptycapped' is a
// testing-only command.
- std::string message = str::stream() << "Encountered unsupported command type '"
- << firstElem.fieldName()
- << "' during rollback.";
+ std::string message = str::stream()
+ << "Encountered unsupported command type '" << firstElem.fieldName()
+ << "' during rollback.";
return Status(ErrorCodes::UnrecoverableRollbackError, message);
}
case OplogEntry::CommandType::kCreate:
@@ -594,8 +595,7 @@ void RollbackImpl::_correctRecordStoreCounts(OperationContext* opCtx) {
auto collToScan = autoCollToScan.getCollection();
invariant(coll == collToScan,
str::stream() << "Catalog returned invalid collection: " << nss.ns() << " ("
- << uuid.toString()
- << ")");
+ << uuid.toString() << ")");
auto exec = collToScan->makePlanExecutor(
opCtx, PlanExecutor::INTERRUPT_ONLY, Collection::ScanDirection::kForward);
long long countFromScan = 0;
@@ -816,8 +816,7 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr
const auto uuid = oplogEntry.getUuid().get();
invariant(_countDiffs.find(uuid) == _countDiffs.end(),
str::stream() << "Unexpected existing count diff for " << uuid.toString()
- << " op: "
- << redact(oplogEntry.toBSON()));
+ << " op: " << redact(oplogEntry.toBSON()));
if (auto countResult = _parseDroppedCollectionCount(oplogEntry)) {
PendingDropInfo info;
info.count = *countResult;
@@ -843,10 +842,9 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr
<< "Oplog entry to roll back is unexpectedly missing dropTarget UUID: "
<< redact(oplogEntry.toBSON()));
invariant(_countDiffs.find(dropTargetUUID) == _countDiffs.end(),
- str::stream() << "Unexpected existing count diff for "
- << dropTargetUUID.toString()
- << " op: "
- << redact(oplogEntry.toBSON()));
+ str::stream()
+ << "Unexpected existing count diff for " << dropTargetUUID.toString()
+ << " op: " << redact(oplogEntry.toBSON()));
if (auto countResult = _parseDroppedCollectionCount(oplogEntry)) {
PendingDropInfo info;
info.count = *countResult;
@@ -1012,9 +1010,7 @@ Status RollbackImpl::_checkAgainstTimeLimit(
if (diff > timeLimit) {
return Status(ErrorCodes::UnrecoverableRollbackError,
str::stream() << "not willing to roll back more than " << timeLimit
- << " seconds of data. Have: "
- << diff
- << " seconds.");
+ << " seconds of data. Have: " << diff << " seconds.");
}
} else {
@@ -1044,8 +1040,7 @@ Timestamp RollbackImpl::_findTruncateTimestamp(
invariant(commonPointTime.getStatus());
invariant(commonPointTime.getValue() == commonPointOpTime,
str::stream() << "Common point: " << commonPointOpTime.toString()
- << ", record found: "
- << commonPointTime.getValue().toString());
+ << ", record found: " << commonPointTime.getValue().toString());
// Get the next document, which will be the first document to truncate.
auto truncatePointRecord = oplogCursor->next();
diff --git a/src/mongo/db/repl/rollback_impl.h b/src/mongo/db/repl/rollback_impl.h
index ef1e1985cb5..00ca82d3fef 100644
--- a/src/mongo/db/repl/rollback_impl.h
+++ b/src/mongo/db/repl/rollback_impl.h
@@ -285,7 +285,7 @@ public:
virtual const std::vector<BSONObj>& docsDeletedForNamespace_forTest(UUID uuid) const& {
MONGO_UNREACHABLE;
}
- void docsDeletedForNamespace_forTest(UUID)&& = delete;
+ void docsDeletedForNamespace_forTest(UUID) && = delete;
protected:
/**
diff --git a/src/mongo/db/repl/rollback_impl_test.cpp b/src/mongo/db/repl/rollback_impl_test.cpp
index 77a28bfbe1c..e7fa22eef19 100644
--- a/src/mongo/db/repl/rollback_impl_test.cpp
+++ b/src/mongo/db/repl/rollback_impl_test.cpp
@@ -69,37 +69,21 @@ std::string kGenericUUIDStr = "b4c66a44-c1ca-4d86-8d25-12e82fa2de5b";
BSONObj makeInsertOplogEntry(long long time, BSONObj obj, StringData ns, UUID uuid) {
return BSON("ts" << Timestamp(time, time) << "t" << time << "op"
<< "i"
- << "o"
- << obj
- << "ns"
- << ns
- << "ui"
- << uuid);
+ << "o" << obj << "ns" << ns << "ui" << uuid);
}
BSONObj makeUpdateOplogEntry(
long long time, BSONObj query, BSONObj update, StringData ns, UUID uuid) {
return BSON("ts" << Timestamp(time, time) << "t" << time << "op"
<< "u"
- << "ns"
- << ns
- << "ui"
- << uuid
- << "o2"
- << query
- << "o"
+ << "ns" << ns << "ui" << uuid << "o2" << query << "o"
<< BSON("$set" << update));
}
BSONObj makeDeleteOplogEntry(long long time, BSONObj id, StringData ns, UUID uuid) {
return BSON("ts" << Timestamp(time, time) << "t" << time << "op"
<< "d"
- << "ns"
- << ns
- << "ui"
- << uuid
- << "o"
- << id);
+ << "ns" << ns << "ui" << uuid << "o" << id);
}
class RollbackImplForTest final : public RollbackImpl {
@@ -380,12 +364,7 @@ BSONObj makeOp(OpTime time) {
auto kGenericUUID = unittest::assertGet(UUID::parse(kGenericUUIDStr));
return BSON("ts" << time.getTimestamp() << "t" << time.getTerm() << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
- << nss.ns()
- << "ui"
- << kGenericUUID);
+ << "o" << BSONObj() << "ns" << nss.ns() << "ui" << kGenericUUID);
}
BSONObj makeOp(int count) {
@@ -400,13 +379,9 @@ auto makeOpWithWallClockTime(long count, long wallClockMillis) {
auto kGenericUUID = unittest::assertGet(UUID::parse(kGenericUUIDStr));
return BSON("ts" << Timestamp(count, count) << "t" << (long long)count << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "top"
- << "ui"
- << kGenericUUID
- << "wall"
+ << "ui" << kGenericUUID << "wall"
<< Date_t::fromMillisSinceEpoch(wallClockMillis));
};
@@ -955,14 +930,10 @@ TEST_F(RollbackImplTest, RollbackDoesNotWriteRollbackFilesIfNoInsertsOrUpdatesAf
const auto uuid = UUID::gen();
const auto nss = NamespaceString("db.coll");
const auto coll = _initializeCollection(_opCtx.get(), uuid, nss);
- const auto oplogEntry = BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op"
- << "c"
- << "o"
- << BSON("create" << nss.coll())
- << "ns"
- << nss.ns()
- << "ui"
- << uuid);
+ const auto oplogEntry =
+ BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op"
+ << "c"
+ << "o" << BSON("create" << nss.coll()) << "ns" << nss.ns() << "ui" << uuid);
ASSERT_OK(_insertOplogEntry(oplogEntry));
ASSERT_OK(_rollback->runRollback(_opCtx.get()));
@@ -1183,12 +1154,7 @@ TEST_F(RollbackImplTest, RollbackProperlySavesFilesWhenInsertsAndDropOfCollectio
const auto oplogEntry =
BSON("ts" << dropOpTime.getTimestamp() << "t" << dropOpTime.getTerm() << "op"
<< "c"
- << "o"
- << BSON("drop" << nss.coll())
- << "ns"
- << nss.ns()
- << "ui"
- << uuid);
+ << "o" << BSON("drop" << nss.coll()) << "ns" << nss.ns() << "ui" << uuid);
ASSERT_OK(_insertOplogEntry(oplogEntry));
ASSERT_OK(_rollback->runRollback(_opCtx.get()));
@@ -1213,14 +1179,10 @@ TEST_F(RollbackImplTest, RollbackProperlySavesFilesWhenCreateCollAndInsertsAreRo
const auto nss = NamespaceString("db.people");
const auto uuid = UUID::gen();
const auto coll = _initializeCollection(_opCtx.get(), uuid, nss);
- const auto oplogEntry = BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op"
- << "c"
- << "o"
- << BSON("create" << nss.coll())
- << "ns"
- << nss.ns()
- << "ui"
- << uuid);
+ const auto oplogEntry =
+ BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op"
+ << "c"
+ << "o" << BSON("create" << nss.coll()) << "ns" << nss.ns() << "ui" << uuid);
ASSERT_OK(_insertOplogEntry(oplogEntry));
// Insert documents into the collection.
@@ -1584,14 +1546,14 @@ public:
void assertRollbackInfoContainsObjectForUUID(UUID uuid, BSONObj bson) {
const auto& uuidToIdMap = _rbInfo.rollbackDeletedIdsMap;
auto search = uuidToIdMap.find(uuid);
- ASSERT(search != uuidToIdMap.end()) << "map is unexpectedly missing an entry for uuid "
- << uuid.toString() << " containing object "
- << bson.jsonString();
+ ASSERT(search != uuidToIdMap.end())
+ << "map is unexpectedly missing an entry for uuid " << uuid.toString()
+ << " containing object " << bson.jsonString();
const auto& idObjSet = search->second;
const auto iter = idObjSet.find(bson);
- ASSERT(iter != idObjSet.end()) << "_id object set is unexpectedly missing object "
- << bson.jsonString() << " in namespace with uuid "
- << uuid.toString();
+ ASSERT(iter != idObjSet.end())
+ << "_id object set is unexpectedly missing object " << bson.jsonString()
+ << " in namespace with uuid " << uuid.toString();
}
@@ -1675,12 +1637,12 @@ TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfDropColl
TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfCreateIndexOplogEntry) {
auto nss = NamespaceString("test", "coll");
- auto indexObj = BSON("createIndexes" << nss.coll() << "ns" << nss.toString() << "v"
- << static_cast<int>(IndexDescriptor::IndexVersion::kV2)
- << "key"
- << "x"
- << "name"
- << "x_1");
+ auto indexObj =
+ BSON("createIndexes" << nss.coll() << "ns" << nss.toString() << "v"
+ << static_cast<int>(IndexDescriptor::IndexVersion::kV2) << "key"
+ << "x"
+ << "name"
+ << "x_1");
auto cmdOp =
makeCommandOp(Timestamp(2, 2), UUID::gen(), nss.getCommandNS().toString(), indexObj, 2);
diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp
index aecd404f865..7d893f0c4fb 100644
--- a/src/mongo/db/repl/rollback_source_impl.cpp
+++ b/src/mongo/db/repl/rollback_source_impl.cpp
@@ -107,9 +107,7 @@ StatusWith<BSONObj> RollbackSourceImpl::getCollectionInfoByUUID(const std::strin
return StatusWith<BSONObj>(ErrorCodes::NoSuchKey,
str::stream()
<< "No collection info found for collection with uuid: "
- << uuid.toString()
- << " in db: "
- << db);
+ << uuid.toString() << " in db: " << db);
}
invariant(info.size() == 1U);
return info.front();
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index a157dcf76df..b5824819380 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -295,12 +295,9 @@ void RollbackResyncsCollectionOptionsTest::resyncCollectionOptionsTest(
auto commonOpUuid = unittest::assertGet(UUID::parse("f005ba11-cafe-bead-f00d-123456789abc"));
auto commonOpBson = BSON("ts" << Timestamp(1, 1) << "t" << 1LL << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "rollback_test.test"
- << "ui"
- << commonOpUuid);
+ << "ui" << commonOpUuid);
auto commonOperation = std::make_pair(commonOpBson, RecordId(1));
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index a94dc1c42fb..491ace5de8b 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -83,13 +83,13 @@
namespace mongo {
-using std::shared_ptr;
-using std::unique_ptr;
using std::list;
using std::map;
+using std::pair;
using std::set;
+using std::shared_ptr;
using std::string;
-using std::pair;
+using std::unique_ptr;
namespace repl {
@@ -199,10 +199,10 @@ Status FixUpInfo::recordDropTargetInfo(const BSONElement& dropTarget,
OpTime opTime) {
StatusWith<UUID> dropTargetUUIDStatus = UUID::parse(dropTarget);
if (!dropTargetUUIDStatus.isOK()) {
- std::string message = str::stream() << "Unable to roll back renameCollection. Cannot parse "
- "dropTarget UUID. Returned status: "
- << redact(dropTargetUUIDStatus.getStatus())
- << ", oplog entry: " << redact(obj);
+ std::string message = str::stream()
+ << "Unable to roll back renameCollection. Cannot parse "
+ "dropTarget UUID. Returned status: "
+ << redact(dropTargetUUIDStatus.getStatus()) << ", oplog entry: " << redact(obj);
error() << message;
return dropTargetUUIDStatus.getStatus();
}
@@ -227,8 +227,8 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
// Checks that the oplog entry is smaller than 512 MB. We do not roll back if the
// oplog entry is larger than 512 MB.
if (ourObj.objsize() > 512 * 1024 * 1024)
- throw RSFatalException(str::stream() << "Rollback too large, oplog size: "
- << ourObj.objsize());
+ throw RSFatalException(str::stream()
+ << "Rollback too large, oplog size: " << ourObj.objsize());
// If required fields are not present in the BSONObj for an applyOps entry, create these fields
// and populate them with dummy values before parsing ourObj as an oplog entry.
@@ -1235,8 +1235,9 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
// is rolled back upstream and we restart, we expect to still have the
// collection.
- log() << nss->ns() << " not found on remote host, so we do not roll back collmod "
- "operation. Instead, we will drop the collection soon.";
+ log() << nss->ns()
+ << " not found on remote host, so we do not roll back collmod "
+ "operation. Instead, we will drop the collection soon.";
continue;
}
@@ -1246,18 +1247,18 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
// Updates the collection flags.
if (auto optionsField = info["options"]) {
if (optionsField.type() != Object) {
- throw RSFatalException(str::stream() << "Failed to parse options " << info
- << ": expected 'options' to be an "
- << "Object, got "
- << typeName(optionsField.type()));
+ throw RSFatalException(str::stream()
+ << "Failed to parse options " << info
+ << ": expected 'options' to be an "
+ << "Object, got " << typeName(optionsField.type()));
}
auto statusWithCollectionOptions = CollectionOptions::parse(
optionsField.Obj(), CollectionOptions::parseForCommand);
if (!statusWithCollectionOptions.isOK()) {
- throw RSFatalException(
- str::stream() << "Failed to parse options " << info << ": "
- << statusWithCollectionOptions.getStatus().toString());
+ throw RSFatalException(str::stream()
+ << "Failed to parse options " << info << ": "
+ << statusWithCollectionOptions.getStatus().toString());
}
options = statusWithCollectionOptions.getValue();
} else {
@@ -1275,13 +1276,10 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
auto validatorStatus = collection->updateValidator(
opCtx, options.validator, options.validationLevel, options.validationAction);
if (!validatorStatus.isOK()) {
- throw RSFatalException(
- str::stream() << "Failed to update validator for " << nss->toString() << " ("
- << uuid
- << ") with "
- << redact(info)
- << ". Got: "
- << validatorStatus.toString());
+ throw RSFatalException(str::stream()
+ << "Failed to update validator for " << nss->toString()
+ << " (" << uuid << ") with " << redact(info)
+ << ". Got: " << validatorStatus.toString());
}
wuow.commit();
@@ -1371,8 +1369,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
<< " to archive file: " << redact(status);
throw RSFatalException(str::stream()
<< "Rollback cannot write document in namespace "
- << nss->ns()
- << " to archive file.");
+ << nss->ns() << " to archive file.");
}
} else {
error() << "Rollback cannot find object: " << pattern << " in namespace "
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index a49055ba339..3d12a5bac28 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -81,21 +81,16 @@ OplogInterfaceMock::Operation makeDropIndexOplogEntry(Collection* collection,
BSONObj key,
std::string indexName,
int time) {
- auto indexSpec =
- BSON("ns" << collection->ns().ns() << "key" << key << "name" << indexName << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("ns" << collection->ns().ns() << "key" << key << "name" << indexName
+ << "v" << static_cast<int>(kIndexVersion));
return std::make_pair(
BSON("ts" << Timestamp(Seconds(time), 0) << "op"
<< "c"
- << "ui"
- << collection->uuid()
- << "ns"
+ << "ui" << collection->uuid() << "ns"
<< "test.$cmd"
- << "o"
- << BSON("dropIndexes" << collection->ns().coll() << "index" << indexName)
- << "o2"
- << indexSpec),
+ << "o" << BSON("dropIndexes" << collection->ns().coll() << "index" << indexName)
+ << "o2" << indexSpec),
RecordId(time));
}
@@ -103,22 +98,15 @@ OplogInterfaceMock::Operation makeCreateIndexOplogEntry(Collection* collection,
BSONObj key,
std::string indexName,
int time) {
- auto indexSpec =
- BSON("createIndexes" << collection->ns().coll() << "ns" << collection->ns().ns() << "v"
- << static_cast<int>(kIndexVersion)
- << "key"
- << key
- << "name"
- << indexName);
+ auto indexSpec = BSON(
+ "createIndexes" << collection->ns().coll() << "ns" << collection->ns().ns() << "v"
+ << static_cast<int>(kIndexVersion) << "key" << key << "name" << indexName);
return std::make_pair(BSON("ts" << Timestamp(Seconds(time), 0) << "op"
<< "c"
<< "ns"
<< "test.$cmd"
- << "ui"
- << collection->uuid()
- << "o"
- << indexSpec),
+ << "ui" << collection->uuid() << "o" << indexSpec),
RecordId(time));
}
@@ -140,11 +128,7 @@ OplogInterfaceMock::Operation makeRenameCollectionOplogEntry(const NamespaceStri
}
return std::make_pair(BSON("ts" << opTime.getTimestamp() << "t" << opTime.getTerm() << "op"
<< "c"
- << "ui"
- << collectionUUID
- << "ns"
- << renameFrom.ns()
- << "o"
+ << "ui" << collectionUUID << "ns" << renameFrom.ns() << "o"
<< obj),
RecordId(opTime.getTimestamp().getSecs()));
}
@@ -153,12 +137,9 @@ BSONObj makeOp(long long seconds) {
auto uuid = unittest::assertGet(UUID::parse("f005ba11-cafe-bead-f00d-123456789abc"));
return BSON("ts" << Timestamp(seconds, seconds) << "t" << seconds << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "rs_rollback.test"
- << "ui"
- << uuid);
+ << "ui" << uuid);
}
int recordId = 0;
@@ -294,12 +275,9 @@ int _testRollbackDelete(OperationContext* opCtx,
auto commonOperation = makeOpAndRecordId(1);
auto deleteOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "d"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 0)),
+ << "o" << BSON("_id" << 0)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -421,12 +399,9 @@ TEST_F(RSRollbackTest, RollbackInsertDocumentWithNoId) {
auto commonOperation = makeOpAndRecordId(1);
auto insertDocumentOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ns"
+ << "ui" << UUID::gen() << "ns"
<< "test.t"
- << "o"
- << BSON("a" << 1)),
+ << "o" << BSON("a" << 1)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -465,8 +440,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
NamespaceString nss("test", "coll");
auto collection = _createCollection(_opCtx.get(), nss.toString(), options);
auto indexSpec = BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON("a" << 1)
- << "name"
+ << BSON("a" << 1) << "name"
<< "a_1");
int numIndexes = _createIndexOnEmptyCollection(_opCtx.get(), collection, nss, indexSpec);
@@ -490,13 +464,11 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
_coordinator,
_replicationProcess.get()));
stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining(str::stream()
- << "Dropped index in rollback for collection: "
- << nss.toString()
- << ", UUID: "
- << options.uuid->toString()
- << ", index: a_1"));
+ ASSERT_EQUALS(
+ 1,
+ countLogLinesContaining(str::stream()
+ << "Dropped index in rollback for collection: " << nss.toString()
+ << ", UUID: " << options.uuid->toString() << ", index: a_1"));
{
Lock::DBLock dbLock(_opCtx.get(), nss.db(), MODE_S);
auto indexCatalog = collection->getIndexCatalog();
@@ -512,9 +484,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
auto collection = _createCollection(_opCtx.get(), "test.t", options);
auto indexSpec = BSON("ns"
<< "test.t"
- << "key"
- << BSON("a" << 1)
- << "name"
+ << "key" << BSON("a" << 1) << "name"
<< "a_1");
// Skip index creation to trigger warning during rollback.
{
@@ -663,9 +633,7 @@ TEST_F(RSRollbackTest, RollingBackCreateIndexAndRenameWithLongName) {
auto longName = std::string(115, 'a');
auto indexSpec = BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON("b" << 1)
- << "name"
- << longName);
+ << BSON("b" << 1) << "name" << longName);
int numIndexes = _createIndexOnEmptyCollection(_opCtx.get(), collection, nss, indexSpec);
ASSERT_EQUALS(2, numIndexes);
@@ -713,8 +681,7 @@ TEST_F(RSRollbackTest, RollingBackDropAndCreateOfSameIndexNameWithDifferentSpecs
auto collection = _createCollection(_opCtx.get(), nss.toString(), options);
auto indexSpec = BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON("b" << 1)
- << "name"
+ << BSON("b" << 1) << "name"
<< "a_1");
int numIndexes = _createIndexOnEmptyCollection(_opCtx.get(), collection, nss, indexSpec);
@@ -745,19 +712,15 @@ TEST_F(RSRollbackTest, RollingBackDropAndCreateOfSameIndexNameWithDifferentSpecs
ASSERT(indexCatalog);
ASSERT_EQUALS(2, indexCatalog->numIndexesReady(_opCtx.get()));
ASSERT_EQUALS(1,
- countLogLinesContaining(str::stream()
- << "Dropped index in rollback for collection: "
- << nss.toString()
- << ", UUID: "
- << options.uuid->toString()
- << ", index: a_1"));
+ countLogLinesContaining(
+ str::stream()
+ << "Dropped index in rollback for collection: " << nss.toString()
+ << ", UUID: " << options.uuid->toString() << ", index: a_1"));
ASSERT_EQUALS(1,
- countLogLinesContaining(str::stream()
- << "Created index in rollback for collection: "
- << nss.toString()
- << ", UUID: "
- << options.uuid->toString()
- << ", index: a_1"));
+ countLogLinesContaining(
+ str::stream()
+ << "Created index in rollback for collection: " << nss.toString()
+ << ", UUID: " << options.uuid->toString() << ", index: a_1"));
std::vector<const IndexDescriptor*> indexes;
indexCatalog->findIndexesByKeyPattern(_opCtx.get(), BSON("a" << 1), false, &indexes);
ASSERT(indexes.size() == 1);
@@ -779,20 +742,15 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) {
<< "t"
<< "ns"
<< "test.t"
- << "v"
- << static_cast<int>(kIndexVersion)
- << "key"
- << BSON("a" << 1));
-
- auto createIndexOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
- << "c"
- << "ns"
- << "test.$cmd"
- << "ui"
- << collection->uuid()
- << "o"
- << command),
- RecordId(2));
+ << "v" << static_cast<int>(kIndexVersion) << "key" << BSON("a" << 1));
+
+ auto createIndexOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
+ << "c"
+ << "ns"
+ << "test.$cmd"
+ << "ui" << collection->uuid() << "o" << command),
+ RecordId(2));
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
})));
@@ -822,9 +780,7 @@ std::string idxName(std::string id) {
// Create an index spec object given the namespace and the index 'id'.
BSONObj idxSpec(NamespaceString nss, std::string id) {
return BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON(idxKey(id) << 1)
- << "name"
- << idxName(id));
+ << BSON(idxKey(id) << 1) << "name" << idxName(id));
}
// Returns the number of indexes that exist on the given collection.
@@ -947,9 +903,7 @@ TEST_F(RSRollbackTest, RollbackCreateDropRecreateIndexOnCollection) {
// Create the necessary indexes. Index 0 is created, dropped, and created again in the
// sequence of ops, so we create that index.
auto indexSpec = BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON(idxKey("0") << 1)
- << "name"
- << idxName("0"));
+ << BSON(idxKey("0") << 1) << "name" << idxName("0"));
int numIndexes = _createIndexOnEmptyCollection(_opCtx.get(), coll, nss, indexSpec);
ASSERT_EQUALS(2, numIndexes);
@@ -984,9 +938,7 @@ TEST_F(RSRollbackTest, RollbackUnknownCommand) {
auto commonOperation = makeOpAndRecordId(1);
auto unknownCommandOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "c"
- << "ui"
- << UUID::gen()
- << "ns"
+ << "ui" << UUID::gen() << "ns"
<< "test.t"
<< "o"
<< BSON("convertToCapped"
@@ -1020,9 +972,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommand) {
auto dropCollectionOperation =
std::make_pair(BSON("ts" << dropTime.getTimestamp() << "t" << dropTime.getTerm() << "op"
<< "c"
- << "ui"
- << coll->uuid()
- << "ns"
+ << "ui" << coll->uuid() << "ns"
<< "test.t"
<< "o"
<< BSON("drop"
@@ -1344,9 +1294,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionThenRenameCollectionToDroppedCollec
auto dropCollectionOperation =
std::make_pair(BSON("ts" << dropTime.getTimestamp() << "t" << dropTime.getTerm() << "op"
<< "c"
- << "ui"
- << droppedCollectionUUID
- << "ns"
+ << "ui" << droppedCollectionUUID << "ns"
<< "test.x"
<< "o"
<< BSON("drop"
@@ -1416,16 +1364,15 @@ TEST_F(RSRollbackTest, RollbackRenameCollectionThenCreateNewCollectionWithOldNam
false,
OpTime(Timestamp(2, 0), 5));
- auto createCollectionOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(3), 0) << "op"
- << "c"
- << "ui"
- << createdCollectionUUID
- << "ns"
- << "test.x"
- << "o"
- << BSON("create"
- << "x")),
- RecordId(3));
+ auto createCollectionOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(3), 0) << "op"
+ << "c"
+ << "ui" << createdCollectionUUID << "ns"
+ << "test.x"
+ << "o"
+ << BSON("create"
+ << "x")),
+ RecordId(3));
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
@@ -1466,9 +1413,7 @@ TEST_F(RSRollbackTest, RollbackCollModCommandFailsIfRBIDChangesWhileSyncingColle
auto commonOperation = makeOpAndRecordId(1);
auto collModOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "c"
- << "ui"
- << coll->uuid()
- << "ns"
+ << "ui" << coll->uuid() << "ns"
<< "test.t"
<< "o"
<< BSON("collMod"
@@ -1512,8 +1457,7 @@ TEST_F(RSRollbackTest, RollbackDropDatabaseCommand) {
<< "c"
<< "ns"
<< "test.$cmd"
- << "o"
- << BSON("dropDatabase" << 1)),
+ << "o" << BSON("dropDatabase" << 1)),
RecordId(2));
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
@@ -1581,93 +1525,47 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
UUID uuid = coll->uuid();
const auto commonOperation = makeOpAndRecordId(1);
const auto applyOpsOperation =
- std::make_pair(makeApplyOpsOplogEntry(Timestamp(Seconds(2), 0),
- {BSON("op"
- << "u"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o2"
- << BSON("_id" << 1)
- << "o"
- << BSON("_id" << 1 << "v" << 2)),
- BSON("op"
- << "u"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(2, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o2"
- << BSON("_id" << 2)
- << "o"
- << BSON("_id" << 2 << "v" << 4)),
- BSON("op"
- << "d"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(3, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 3)),
- BSON("op"
- << "i"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(4, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 4)),
- // applyOps internal oplog entries are not required
- // to have a timestamp.
- BSON("op"
- << "i"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(4, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 4)),
- BSON("op"
- << "i"
- << "ui"
- << uuid
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 4)),
- BSON("op"
- << "i"
- << "ui"
- << uuid
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 4))}),
+ std::make_pair(makeApplyOpsOplogEntry(
+ Timestamp(Seconds(2), 0),
+ {BSON("op"
+ << "u"
+ << "ui" << uuid << "ts" << Timestamp(1, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o2" << BSON("_id" << 1) << "o"
+ << BSON("_id" << 1 << "v" << 2)),
+ BSON("op"
+ << "u"
+ << "ui" << uuid << "ts" << Timestamp(2, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o2" << BSON("_id" << 2) << "o"
+ << BSON("_id" << 2 << "v" << 4)),
+ BSON("op"
+ << "d"
+ << "ui" << uuid << "ts" << Timestamp(3, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 3)),
+ BSON("op"
+ << "i"
+ << "ui" << uuid << "ts" << Timestamp(4, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 4)),
+ // applyOps internal oplog entries are not required
+ // to have a timestamp.
+ BSON("op"
+ << "i"
+ << "ui" << uuid << "ts" << Timestamp(4, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 4)),
+ BSON("op"
+ << "i"
+ << "ui" << uuid << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 4)),
+ BSON("op"
+ << "i"
+ << "ui" << uuid << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 4))}),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
@@ -1735,9 +1633,7 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
auto commonOperation = makeOpAndRecordId(1);
auto createCollectionOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "c"
- << "ui"
- << coll->uuid()
- << "ns"
+ << "ui" << coll->uuid() << "ns"
<< "test.t"
<< "o"
<< BSON("create"
@@ -1965,31 +1861,19 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommandInvalidCollectionOpt
TEST(RSRollbackTest, LocalEntryWithoutNsIsFatal) {
const auto validOplogEntry = BSON("op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, validOplogEntry, false));
const auto invalidOplogEntry = BSON("op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< ""
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, invalidOplogEntry, false),
RSFatalException);
@@ -1998,31 +1882,19 @@ TEST(RSRollbackTest, LocalEntryWithoutNsIsFatal) {
TEST(RSRollbackTest, LocalEntryWithoutOIsFatal) {
const auto validOplogEntry = BSON("op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, validOplogEntry, false));
const auto invalidOplogEntry = BSON("op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSONObj());
+ << "o" << BSONObj());
ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, invalidOplogEntry, false),
RSFatalException);
@@ -2031,16 +1903,10 @@ TEST(RSRollbackTest, LocalEntryWithoutOIsFatal) {
DEATH_TEST_F(RSRollbackTest, LocalUpdateEntryWithoutO2IsFatal, "Fatal Assertion") {
const auto invalidOplogEntry = BSON("op"
<< "u"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
FixUpInfo fui;
updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, invalidOplogEntry, false)
@@ -2050,34 +1916,20 @@ DEATH_TEST_F(RSRollbackTest, LocalUpdateEntryWithoutO2IsFatal, "Fatal Assertion"
TEST(RSRollbackTest, LocalUpdateEntryWithEmptyO2IsFatal) {
const auto validOplogEntry = BSON("op"
<< "u"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "o2"
+ << "o" << BSON("_id" << 1 << "a" << 1) << "o2"
<< BSON("_id" << 1));
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, validOplogEntry, false));
const auto invalidOplogEntry = BSON("op"
<< "u"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "o2"
+ << "o" << BSON("_id" << 1 << "a" << 1) << "o2"
<< BSONObj());
ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, invalidOplogEntry, false),
@@ -2087,12 +1939,9 @@ TEST(RSRollbackTest, LocalUpdateEntryWithEmptyO2IsFatal) {
DEATH_TEST_F(RSRollbackTest, LocalEntryWithTxnNumberWithoutSessionIdIsFatal, "invariant") {
auto validOplogEntry = BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL << "op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ns"
+ << "ui" << UUID::gen() << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, validOplogEntry, false));
@@ -2113,18 +1962,10 @@ TEST_F(RSRollbackTest, LocalEntryWithTxnNumberWithoutTxnTableUUIDIsFatal) {
auto lsid = makeLogicalSessionIdForTest();
auto entryWithTxnNumber = BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL << "op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON());
+ << "o" << BSON("_id" << 1 << "a" << 1) << "txnNumber" << 1LL
+ << "stmtId" << 1 << "lsid" << lsid.toBSON());
FixUpInfo fui;
ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(
@@ -2138,12 +1979,9 @@ TEST_F(RSRollbackTest, LocalEntryWithTxnNumberAddsTransactionTableDocToBeRefetch
// With no txnNumber present, no extra documents need to be refetched.
auto entryWithoutTxnNumber = BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL << "op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ns"
+ << "ui" << UUID::gen() << "ns"
<< "test.t2"
- << "o"
- << BSON("_id" << 2 << "a" << 2));
+ << "o" << BSON("_id" << 2 << "a" << 2));
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, entryWithoutTxnNumber, false));
@@ -2156,18 +1994,10 @@ TEST_F(RSRollbackTest, LocalEntryWithTxnNumberAddsTransactionTableDocToBeRefetch
auto lsid = makeLogicalSessionIdForTest();
auto entryWithTxnNumber = BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL << "op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON());
+ << "o" << BSON("_id" << 1 << "a" << 1) << "txnNumber" << 1LL
+ << "stmtId" << 1 << "lsid" << lsid.toBSON());
UUID transactionTableUUID = UUID::gen();
fui.transactionTableUUID = transactionTableUUID;
@@ -2197,20 +2027,11 @@ TEST_F(RSRollbackTest, LocalEntryWithPartialTxnAddsTransactionTableDocToBeRefetc
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON());
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 1 << "lsid" << lsid.toBSON());
UUID transactionTableUUID = UUID::gen();
fui.transactionTableUUID = transactionTableUUID;
@@ -2233,15 +2054,8 @@ TEST_F(RSRollbackTest, LocalAbortTxnRefetchesTransactionTableEntry) {
<< "c"
<< "ns"
<< "admin.$cmd"
- << "o"
- << BSON("abortTransaction" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("abortTransaction" << 1) << "txnNumber" << 1LL
+ << "stmtId" << 1 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL));
UUID transactionTableUUID = UUID::gen();
@@ -2269,15 +2083,8 @@ TEST_F(RSRollbackTest, LocalEntryWithAbortedPartialTxnRefetchesOnlyTransactionTa
<< "c"
<< "ns"
<< "admin.$cmd"
- << "o"
- << BSON("abortTransaction" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("abortTransaction" << 1) << "txnNumber" << 1LL
+ << "stmtId" << 1 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(1), 1) << "t" << 1LL));
auto entryWithTxnNumber =
@@ -2288,20 +2095,11 @@ TEST_F(RSRollbackTest, LocalEntryWithAbortedPartialTxnRefetchesOnlyTransactionTa
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON());
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 1 << "lsid" << lsid.toBSON());
UUID transactionTableUUID = UUID::gen();
fui.transactionTableUUID = transactionTableUUID;
@@ -2328,21 +2126,11 @@ TEST_F(RSRollbackTest, LocalEntryWithCommittedTxnRefetchesDocsAndTransactionTabl
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 2 << "a" << 2)))
- << "count"
- << 2)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 2
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 2 << "a" << 2)))
+ << "count" << 2)
+ << "txnNumber" << 1LL << "stmtId" << 2 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(1), 1) << "t" << 1LL));
auto commitTxnOperation = std::make_pair(commitTxnEntry, RecordId(2));
@@ -2354,21 +2142,11 @@ TEST_F(RSRollbackTest, LocalEntryWithCommittedTxnRefetchesDocsAndTransactionTabl
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 1 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(0, 0) << "t" << -1LL));
auto partialTxnOperation = std::make_pair(partialTxnEntry, RecordId(1));
@@ -2421,21 +2199,11 @@ TEST_F(RSRollbackTest, RollbackFetchesTransactionOperationBeforeCommonPoint) {
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 0 << "a" << 0)))
- << "count"
- << 3)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 3
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 0 << "a" << 0)))
+ << "count" << 3)
+ << "txnNumber" << 1LL << "stmtId" << 3 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(10), 11) << "t" << 10LL));
auto commitTxnOperation = std::make_pair(commitTxnEntry, RecordId(12));
@@ -2447,21 +2215,11 @@ TEST_F(RSRollbackTest, RollbackFetchesTransactionOperationBeforeCommonPoint) {
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 2
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 2 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(10), 9) << "t" << 10LL));
auto operationAfterCommonPoint = std::make_pair(entryAfterCommonPoint, RecordId(11));
@@ -2473,21 +2231,11 @@ TEST_F(RSRollbackTest, RollbackFetchesTransactionOperationBeforeCommonPoint) {
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 2 << "a" << 2)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 2 << "a" << 2)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 1 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(0, 0) << "t" << -1LL));
auto operationBeforeCommonPoint = std::make_pair(entryBeforeCommonPoint, RecordId(9));
@@ -2565,19 +2313,11 @@ TEST_F(RSRollbackTest, RollbackIncompleteTransactionReturnsUnrecoverableRollback
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 0 << "a" << 0)))
- << "count"
- << 3)
- << "stmtId"
- << 3
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 0 << "a" << 0)))
+ << "count" << 3)
+ << "stmtId" << 3 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(10), 11) << "t" << 10LL));
auto commitTxnOperation = std::make_pair(commitTxnEntry, RecordId(12));
@@ -2589,21 +2329,11 @@ TEST_F(RSRollbackTest, RollbackIncompleteTransactionReturnsUnrecoverableRollback
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 2
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 2 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(10), 9) << "t" << 10LL));
auto operationAfterCommonPoint = std::make_pair(entryAfterCommonPoint, RecordId(11));
@@ -2646,20 +2376,13 @@ TEST_F(RSRollbackTest, RollbackFailsIfTransactionDocumentRefetchReturnsDifferent
// transaction number and session id.
FixUpInfo fui;
- auto entryWithTxnNumber = BSON("ts" << Timestamp(Seconds(2), 1) << "t" << 1LL << "op"
- << "i"
- << "ui"
- << UUID::gen()
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << makeLogicalSessionIdForTest().toBSON());
+ auto entryWithTxnNumber =
+ BSON("ts" << Timestamp(Seconds(2), 1) << "t" << 1LL << "op"
+ << "i"
+ << "ui" << UUID::gen() << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 1 << "a" << 1) << "txnNumber" << 1LL << "stmtId" << 1
+ << "lsid" << makeLogicalSessionIdForTest().toBSON());
UUID transactionTableUUID = UUID::gen();
fui.transactionTableUUID = transactionTableUUID;
diff --git a/src/mongo/db/repl/split_horizon_test.cpp b/src/mongo/db/repl/split_horizon_test.cpp
index 0a3a655ccaf..95b2df2ad36 100644
--- a/src/mongo/db/repl/split_horizon_test.cpp
+++ b/src/mongo/db/repl/split_horizon_test.cpp
@@ -300,8 +300,7 @@ TEST(SplitHorizonTesting, BSONConstruction) {
// Two horizons with duplicate host and ports.
{BSON("horizonWithDuplicateHost1" << matchingHostAndPort << "horizonWithDuplicateHost2"
- << matchingHostAndPort
- << "uniqueHorizon"
+ << matchingHostAndPort << "uniqueHorizon"
<< nonmatchingHost),
defaultHostAndPort,
{},
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 03d8bc66e55..7145bb15560 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -503,20 +503,16 @@ Status StorageInterfaceImpl::renameCollection(OperationContext* opCtx,
if (fromNS.db() != toNS.db()) {
return Status(ErrorCodes::InvalidNamespace,
str::stream() << "Cannot rename collection between databases. From NS: "
- << fromNS.ns()
- << "; to NS: "
- << toNS.ns());
+ << fromNS.ns() << "; to NS: " << toNS.ns());
}
return writeConflictRetry(opCtx, "StorageInterfaceImpl::renameCollection", fromNS.ns(), [&] {
AutoGetDb autoDB(opCtx, fromNS.db(), MODE_X);
if (!autoDB.getDb()) {
return Status(ErrorCodes::NamespaceNotFound,
- str::stream() << "Cannot rename collection from " << fromNS.ns() << " to "
- << toNS.ns()
- << ". Database "
- << fromNS.db()
- << " not found.");
+ str::stream()
+ << "Cannot rename collection from " << fromNS.ns() << " to "
+ << toNS.ns() << ". Database " << fromNS.db() << " not found.");
}
WriteUnitOfWork wunit(opCtx);
const auto status = autoDB.getDb()->renameCollection(opCtx, fromNS, toNS, stayTemp);
@@ -559,8 +555,7 @@ Status StorageInterfaceImpl::setIndexIsMultikey(OperationContext* opCtx,
if (!idx) {
return Status(ErrorCodes::IndexNotFound,
str::stream() << "Could not find index " << indexName << " in "
- << nss.ns()
- << " to set to multikey.");
+ << nss.ns() << " to set to multikey.");
}
collection->getIndexCatalog()->setMultikeyPaths(opCtx, idx, paths);
wunit.commit();
@@ -648,16 +643,13 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
if (!indexDescriptor) {
return Result(ErrorCodes::IndexNotFound,
str::stream() << "Index not found, ns:" << nsOrUUID.toString()
- << ", index: "
- << *indexName);
+ << ", index: " << *indexName);
}
if (indexDescriptor->isPartial()) {
return Result(ErrorCodes::IndexOptionsConflict,
str::stream()
<< "Partial index is not allowed for this operation, ns:"
- << nsOrUUID.toString()
- << ", index: "
- << *indexName);
+ << nsOrUUID.toString() << ", index: " << *indexName);
}
KeyPattern keyPattern(indexDescriptor->keyPattern());
@@ -857,11 +849,11 @@ Status _updateWithQuery(OperationContext* opCtx,
}
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
- auto collectionResult = getCollection(
- autoColl,
- nss,
- str::stream() << "Unable to update documents in " << nss.ns() << " using query "
- << request.getQuery());
+ auto collectionResult =
+ getCollection(autoColl,
+ nss,
+ str::stream() << "Unable to update documents in " << nss.ns()
+ << " using query " << request.getQuery());
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
@@ -990,11 +982,11 @@ Status StorageInterfaceImpl::deleteByFilter(OperationContext* opCtx,
}
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
- auto collectionResult = getCollection(
- autoColl,
- nss,
- str::stream() << "Unable to delete documents in " << nss.ns() << " using filter "
- << filter);
+ auto collectionResult =
+ getCollection(autoColl,
+ nss,
+ str::stream() << "Unable to delete documents in " << nss.ns()
+ << " using filter " << filter);
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index 912599d9e9c..aa021037cc8 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -68,11 +68,7 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
BSONObj makeIdIndexSpec(const NamespaceString& nss) {
return BSON("ns" << nss.toString() << "name"
<< "_id_"
- << "key"
- << BSON("_id" << 1)
- << "unique"
- << true
- << "v"
+ << "key" << BSON("_id" << 1) << "unique" << true << "v"
<< static_cast<int>(kIndexVersion));
}
@@ -297,8 +293,7 @@ void _assertRollbackIDDocument(OperationContext* opCtx, int id) {
opCtx,
NamespaceString(StorageInterfaceImpl::kDefaultRollbackIdNamespace),
{BSON("_id" << StorageInterfaceImpl::kRollbackIdDocumentId
- << StorageInterfaceImpl::kRollbackIdFieldName
- << id)});
+ << StorageInterfaceImpl::kRollbackIdFieldName << id)});
}
TEST_F(StorageInterfaceImplTest, RollbackIdInitializesIncrementsAndReadsProperly) {
@@ -378,8 +373,7 @@ TEST_F(StorageInterfaceImplTest, GetRollbackIDReturnsBadStatusIfRollbackIDIsNotI
std::vector<TimestampedBSONObj> badDoc = {
TimestampedBSONObj{BSON("_id" << StorageInterfaceImpl::kRollbackIdDocumentId
- << StorageInterfaceImpl::kRollbackIdFieldName
- << "bad id"),
+ << StorageInterfaceImpl::kRollbackIdFieldName << "bad id"),
Timestamp::min()}};
ASSERT_OK(storage.insertDocuments(opCtx, nss, transformInserts(badDoc)));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, storage.getRollbackID(opCtx).getStatus());
@@ -623,8 +617,7 @@ TEST_F(StorageInterfaceImplTest, DestroyingUncommittedCollectionBulkLoaderDropsI
auto nss = makeNamespace(_agent);
std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name"
<< "x_1"
- << "ns"
- << nss.ns())};
+ << "ns" << nss.ns())};
auto destroyLoaderFn = [](std::unique_ptr<CollectionBulkLoader> loader) {
// Destroy 'loader' by letting it go out of scope.
};
@@ -648,8 +641,7 @@ TEST_F(StorageInterfaceImplTest,
auto nss = makeNamespace(_agent);
std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name"
<< "x_1"
- << "ns"
- << nss.ns())};
+ << "ns" << nss.ns())};
auto destroyLoaderFn = [](std::unique_ptr<CollectionBulkLoader> loader) {
// Destroy 'loader' in a new thread that does not have a Client.
stdx::thread([&loader]() { loader.reset(); }).join();
@@ -912,9 +904,7 @@ TEST_F(StorageInterfaceImplTest, FindDocumentsReturnsIndexOptionsConflictIfIndex
auto nss = makeNamespace(_agent);
std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name"
<< "x_1"
- << "ns"
- << nss.ns()
- << "partialFilterExpression"
+ << "ns" << nss.ns() << "partialFilterExpression"
<< BSON("y" << 1))};
auto loader = unittest::assertGet(storage.createCollectionForBulkLoading(
nss, generateOptionsWithUuid(), makeIdIndexSpec(nss), indexes));
@@ -973,8 +963,8 @@ void _assertDocumentsEqual(const StatusWith<std::vector<BSONObj>>& statusWithDoc
const std::vector<BSONObj>& expectedDocs) {
const auto actualDocs = unittest::assertGet(statusWithDocs);
auto iter = actualDocs.cbegin();
- std::string msg = str::stream() << "expected: " << _toString(expectedDocs)
- << "; actual: " << _toString(actualDocs);
+ std::string msg = str::stream()
+ << "expected: " << _toString(expectedDocs) << "; actual: " << _toString(actualDocs);
for (const auto& doc : expectedDocs) {
ASSERT_TRUE(iter != actualDocs.cend()) << msg;
ASSERT_BSONOBJ_EQ(doc, *(iter++));
@@ -2262,9 +2252,7 @@ TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenDatab
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, status);
ASSERT_EQUALS(std::string(str::stream()
<< "Database [nosuchdb] not found. Unable to delete documents in "
- << nss.ns()
- << " using filter "
- << filter),
+ << nss.ns() << " using filter " << filter),
status.reason());
}
@@ -2360,9 +2348,7 @@ TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenColle
ASSERT_EQUALS(std::string(
str::stream()
<< "Collection [mydb.wrongColl] not found. Unable to delete documents in "
- << wrongColl.ns()
- << " using filter "
- << filter),
+ << wrongColl.ns() << " using filter " << filter),
status.reason());
}
@@ -2482,8 +2468,7 @@ TEST_F(StorageInterfaceImplTest,
CollectionOptions options = generateOptionsWithUuid();
options.collation = BSON("locale"
<< "en_US"
- << "strength"
- << 2);
+ << "strength" << 2);
ASSERT_OK(storage.createCollection(opCtx, nss, options));
auto doc1 = BSON("_id" << 1 << "x"
@@ -2658,9 +2643,8 @@ TEST_F(StorageInterfaceImplTest, SetIndexIsMultikeySucceeds) {
ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
auto indexName = "a_b_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a.b" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a.b" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_EQUALS(_createIndexOnEmptyCollection(opCtx, nss, indexSpec), 2);
MultikeyPaths paths = {{1}};
diff --git a/src/mongo/db/repl/storage_interface_mock.h b/src/mongo/db/repl/storage_interface_mock.h
index f4ac2aa763c..ec32c6dc059 100644
--- a/src/mongo/db/repl/storage_interface_mock.h
+++ b/src/mongo/db/repl/storage_interface_mock.h
@@ -345,8 +345,8 @@ public:
[](const NamespaceString& nss,
const CollectionOptions& options,
const BSONObj idIndexSpec,
- const std::vector<BSONObj>&
- secondaryIndexSpecs) -> StatusWith<std::unique_ptr<CollectionBulkLoader>> {
+ const std::vector<BSONObj>& secondaryIndexSpecs)
+ -> StatusWith<std::unique_ptr<CollectionBulkLoader>> {
return Status{ErrorCodes::IllegalOperation, "CreateCollectionForBulkFn not implemented."};
};
InsertDocumentFn insertDocumentFn = [](OperationContext* opCtx,
@@ -397,8 +397,9 @@ public:
IsAdminDbValidFn isAdminDbValidFn = [](OperationContext*) {
return Status{ErrorCodes::IllegalOperation, "IsAdminDbValidFn not implemented."};
};
- GetCollectionUUIDFn getCollectionUUIDFn = [](
- OperationContext* opCtx, const NamespaceString& nss) -> StatusWith<OptionalCollectionUUID> {
+ GetCollectionUUIDFn getCollectionUUIDFn =
+ [](OperationContext* opCtx,
+ const NamespaceString& nss) -> StatusWith<OptionalCollectionUUID> {
return Status{ErrorCodes::IllegalOperation, "GetCollectionUUIDFn not implemented."};
};
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index 865654622e9..45364e05bf7 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -75,8 +75,7 @@ SyncSourceResolver::SyncSourceResolver(executor::TaskExecutor* taskExecutor,
str::stream() << "required optime (if provided) must be more recent than last "
"fetched optime. requiredOpTime: "
<< requiredOpTime.toString()
- << ", lastOpTimeFetched: "
- << lastOpTimeFetched.toString(),
+ << ", lastOpTimeFetched: " << lastOpTimeFetched.toString(),
requiredOpTime.isNull() || requiredOpTime > lastOpTimeFetched);
uassert(ErrorCodes::BadValue, "callback function cannot be null", onCompletion);
}
@@ -172,9 +171,8 @@ std::unique_ptr<Fetcher> SyncSourceResolver::_makeFirstOplogEntryFetcher(
kLocalOplogNss.db().toString(),
BSON("find" << kLocalOplogNss.coll() << "limit" << 1 << "sort" << BSON("$natural" << 1)
<< "projection"
- << BSON(OplogEntryBase::kTimestampFieldName << 1
- << OplogEntryBase::kTermFieldName
- << 1)),
+ << BSON(OplogEntryBase::kTimestampFieldName
+ << 1 << OplogEntryBase::kTermFieldName << 1)),
[=](const StatusWith<Fetcher::QueryResponse>& response,
Fetcher::NextAction*,
BSONObjBuilder*) {
@@ -414,12 +412,11 @@ Status SyncSourceResolver::_compareRequiredOpTimeWithQueryResponse(
const auto opTime = oplogEntry.getOpTime();
if (_requiredOpTime != opTime) {
return Status(ErrorCodes::BadValue,
- str::stream() << "remote oplog contain entry with matching timestamp "
- << opTime.getTimestamp().toString()
- << " but optime "
- << opTime.toString()
- << " does not "
- "match our required optime");
+ str::stream()
+ << "remote oplog contain entry with matching timestamp "
+ << opTime.getTimestamp().toString() << " but optime " << opTime.toString()
+ << " does not "
+ "match our required optime");
}
if (_requiredOpTime.getTerm() != opTime.getTerm()) {
return Status(ErrorCodes::BadValue,
@@ -440,8 +437,7 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
str::stream() << "sync source resolver shut down while looking for "
"required optime "
<< _requiredOpTime.toString()
- << " in candidate's oplog: "
- << candidate))
+ << " in candidate's oplog: " << candidate))
.transitional_ignore();
return;
}
diff --git a/src/mongo/db/repl/sync_source_selector.h b/src/mongo/db/repl/sync_source_selector.h
index 0a620d691a2..c21a5e82a14 100644
--- a/src/mongo/db/repl/sync_source_selector.h
+++ b/src/mongo/db/repl/sync_source_selector.h
@@ -41,7 +41,7 @@ class Timestamp;
namespace rpc {
class ReplSetMetadata;
class OplogQueryMetadata;
-}
+} // namespace rpc
namespace repl {
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 4781dc71500..ef05c77289a 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -779,8 +779,7 @@ void SyncTail::_oplogApplication(ReplicationCoordinator* replCoord,
str::stream() << "Attempted to apply an oplog entry ("
<< firstOpTimeInBatch.toString()
<< ") which is not greater than our last applied OpTime ("
- << lastAppliedOpTimeAtStartOfBatch.toString()
- << ")."));
+ << lastAppliedOpTimeAtStartOfBatch.toString() << ")."));
}
// Don't allow the fsync+lock thread to see intermediate states of batch application.
@@ -810,8 +809,7 @@ void SyncTail::_oplogApplication(ReplicationCoordinator* replCoord,
const auto lastAppliedOpTimeAtEndOfBatch = replCoord->getMyLastAppliedOpTime();
invariant(lastAppliedOpTimeAtStartOfBatch == lastAppliedOpTimeAtEndOfBatch,
str::stream() << "the last known applied OpTime has changed from "
- << lastAppliedOpTimeAtStartOfBatch.toString()
- << " to "
+ << lastAppliedOpTimeAtStartOfBatch.toString() << " to "
<< lastAppliedOpTimeAtEndOfBatch.toString()
<< " in the middle of batch application");
@@ -1282,23 +1280,23 @@ void SyncTail::_applyOps(std::vector<MultiApplier::OperationPtrs>& writerVectors
if (writerVectors[i].empty())
continue;
- _writerPool->schedule([
- this,
- &writer = writerVectors.at(i),
- &status = statusVector->at(i),
- &workerMultikeyPathInfo = workerMultikeyPathInfo->at(i)
- ](auto scheduleStatus) {
- invariant(scheduleStatus);
+ _writerPool->schedule(
+ [this,
+ &writer = writerVectors.at(i),
+ &status = statusVector->at(i),
+ &workerMultikeyPathInfo = workerMultikeyPathInfo->at(i)](auto scheduleStatus) {
+ invariant(scheduleStatus);
- auto opCtx = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
- // This code path is only executed on secondaries and initial syncing nodes, so it is
- // safe to exclude any writes from Flow Control.
- opCtx->setShouldParticipateInFlowControl(false);
+ // This code path is only executed on secondaries and initial syncing nodes, so it
+ // is safe to exclude any writes from Flow Control.
+ opCtx->setShouldParticipateInFlowControl(false);
- status = opCtx->runWithoutInterruptionExceptAtGlobalShutdown(
- [&] { return _applyFunc(opCtx.get(), &writer, this, &workerMultikeyPathInfo); });
- });
+ status = opCtx->runWithoutInterruptionExceptAtGlobalShutdown([&] {
+ return _applyFunc(opCtx.get(), &writer, this, &workerMultikeyPathInfo);
+ });
+ });
}
}
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index 90f18f7eb49..933f23f4667 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -355,14 +355,8 @@ TEST_F(SyncTailTest, SyncApplyCommand) {
NamespaceString nss("test.t");
auto op = BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
- << BSON("create" << nss.coll())
- << "ts"
- << Timestamp(1, 1)
- << "ui"
- << UUID::gen());
+ << "ns" << nss.getCommandNS().ns() << "o" << BSON("create" << nss.coll()) << "ts"
+ << Timestamp(1, 1) << "ui" << UUID::gen());
bool applyCmdCalled = false;
_opObserver->onCreateCollectionFn = [&](OperationContext* opCtx,
Collection*,
@@ -387,13 +381,10 @@ TEST_F(SyncTailTest, SyncApplyCommand) {
TEST_F(SyncTailTest, SyncApplyCommandThrowsException) {
const BSONObj op = BSON("op"
<< "c"
- << "ns"
- << 12345
- << "o"
+ << "ns" << 12345 << "o"
<< BSON("create"
<< "t")
- << "ts"
- << Timestamp(1, 1));
+ << "ts" << Timestamp(1, 1));
// This test relies on the namespace type check of IDL.
ASSERT_THROWS(
SyncTail::syncApply(_opCtx.get(), op, OplogApplication::Mode::kInitialSync, boost::none),
@@ -503,14 +494,9 @@ protected:
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 1)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
_txnNum,
StmtId(0),
@@ -520,14 +506,9 @@ protected:
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss2.ns()
- << "ui"
- << *_uuid2
- << "o"
+ << "ns" << _nss2.ns() << "ui" << *_uuid2 << "o"
<< BSON("_id" << 2)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
_txnNum,
StmtId(1),
@@ -537,11 +518,7 @@ protected:
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss2.ns()
- << "ui"
- << *_uuid2
- << "o"
+ << "ns" << _nss2.ns() << "ui" << *_uuid2 << "o"
<< BSON("_id" << 3)))),
_lsid,
_txnNum,
@@ -696,14 +673,10 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyUnpreparedTransactionTwoBatches) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << (i == 1 ? _nss2.ns() : _nss1.ns())
- << "ui"
- << (i == 1 ? *_uuid2 : *_uuid1)
- << "o"
+ << "ns" << (i == 1 ? _nss2.ns() : _nss1.ns()) << "ui"
+ << (i == 1 ? *_uuid2 : *_uuid1) << "o"
<< insertDocs.back()))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
_txnNum,
StmtId(i),
@@ -774,14 +747,9 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyTwoTransactionsOneBatch) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 1)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
txnNum1,
StmtId(0),
@@ -791,14 +759,9 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyTwoTransactionsOneBatch) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 2)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
txnNum1,
@@ -809,14 +772,9 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyTwoTransactionsOneBatch) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 3)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
txnNum2,
StmtId(0),
@@ -826,14 +784,9 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyTwoTransactionsOneBatch) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 4)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
txnNum2,
StmtId(1),
@@ -897,14 +850,9 @@ protected:
_nss1,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss2.ns()
- << "ui"
- << *_uuid2
- << "o"
+ << "ns" << _nss2.ns() << "ui" << *_uuid2 << "o"
<< BSON("_id" << 3)))
- << "prepare"
- << true),
+ << "prepare" << true),
_lsid,
_txnNum,
StmtId(2),
@@ -914,14 +862,9 @@ protected:
_nss1,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 0)))
- << "prepare"
- << true),
+ << "prepare" << true),
_lsid,
_txnNum,
StmtId(0),
@@ -2240,28 +2183,18 @@ TEST_F(IdempotencyTest, CreateCollectionWithCollation) {
auto insertOp2 = insert(fromjson("{ _id: 'Foo', x: 1 }"));
auto updateOp = update("foo", BSON("$set" << BSON("x" << 2)));
auto dropColl = makeCommandOplogEntry(nextOpTime(), nss, BSON("drop" << nss.coll()));
- auto options = BSON("collation" << BSON("locale"
- << "en"
- << "caseLevel"
- << false
- << "caseFirst"
- << "off"
- << "strength"
- << 1
- << "numericOrdering"
- << false
- << "alternate"
- << "non-ignorable"
- << "maxVariable"
- << "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
- << "57.1")
- << "uuid"
- << uuid);
+ auto options = BSON("collation"
+ << BSON("locale"
+ << "en"
+ << "caseLevel" << false << "caseFirst"
+ << "off"
+ << "strength" << 1 << "numericOrdering" << false << "alternate"
+ << "non-ignorable"
+ << "maxVariable"
+ << "punct"
+ << "normalization" << false << "backwards" << false << "version"
+ << "57.1")
+ << "uuid" << uuid);
auto createColl = makeCreateCollectionOplogEntry(nextOpTime(), nss, options);
// We don't drop and re-create the collection since we don't have ways
@@ -2285,12 +2218,8 @@ TEST_F(IdempotencyTest, CreateCollectionWithIdIndex) {
auto options1 = BSON("idIndex" << BSON("key" << fromjson("{_id: 1}") << "name"
<< "_id_"
- << "v"
- << 2
- << "ns"
- << nss.ns())
- << "uuid"
- << uuid);
+ << "v" << 2 << "ns" << nss.ns())
+ << "uuid" << uuid);
auto createColl1 = makeCreateCollectionOplogEntry(nextOpTime(), nss, options1);
ASSERT_OK(runOpInitialSync(createColl1));
@@ -2324,9 +2253,8 @@ TEST_F(IdempotencyTest, CreateCollectionWithView) {
ASSERT_OK(
runOpInitialSync(makeCreateCollectionOplogEntry(nextOpTime(), viewNss, options.toBSON())));
- auto viewDoc =
- BSON("_id" << NamespaceString(nss.db(), "view").ns() << "viewOn" << nss.coll() << "pipeline"
- << fromjson("[ { '$project' : { 'x' : 1 } } ]"));
+ auto viewDoc = BSON("_id" << NamespaceString(nss.db(), "view").ns() << "viewOn" << nss.coll()
+ << "pipeline" << fromjson("[ { '$project' : { 'x' : 1 } } ]"));
auto insertViewOp = makeInsertDocumentOplogEntry(nextOpTime(), viewNss, viewDoc);
auto dropColl = makeCommandOplogEntry(nextOpTime(), nss, BSON("drop" << nss.coll()));
@@ -2764,14 +2692,9 @@ TEST_F(SyncTailTxnTableTest, RetryableWriteThenMultiStatementTxnWriteOnSameSessi
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss().ns()
- << "ui"
- << uuid
- << "o"
+ << "ns" << nss().ns() << "ui" << uuid << "o"
<< BSON("_id" << 2)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
sessionId,
*sessionInfo.getTxnNumber(),
StmtId(0),
@@ -2823,14 +2746,9 @@ TEST_F(SyncTailTxnTableTest, MultiStatementTxnWriteThenRetryableWriteOnSameSessi
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss().ns()
- << "ui"
- << uuid
- << "o"
+ << "ns" << nss().ns() << "ui" << uuid << "o"
<< BSON("_id" << 2)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
sessionId,
*sessionInfo.getTxnNumber(),
StmtId(0),
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index a79cdfa7faa..4c53b558aa1 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -182,7 +182,6 @@ void TaskRunner::_runTasks() {
"this task has been canceled by a previously invoked task"));
}
tasks.clear();
-
};
cancelTasks();
diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp
index 5aeb5f11741..1aa98b65c8a 100644
--- a/src/mongo/db/repl/topology_coordinator.cpp
+++ b/src/mongo/db/repl/topology_coordinator.cpp
@@ -249,8 +249,8 @@ HostAndPort TopologyCoordinator::chooseNewSyncSource(Date_t now,
_syncSource = _rsConfig.getMemberAt(_forceSyncSourceIndex).getHostAndPort();
_forceSyncSourceIndex = -1;
log() << "choosing sync source candidate by request: " << _syncSource;
- std::string msg(str::stream() << "syncing from: " << _syncSource.toString()
- << " by request");
+ std::string msg(str::stream()
+ << "syncing from: " << _syncSource.toString() << " by request");
setMyHeartbeatMessage(now, msg);
return _syncSource;
}
@@ -572,8 +572,7 @@ Status TopologyCoordinator::prepareHeartbeatResponseV1(Date_t now,
<< "; remote node's: " << rshb;
return Status(ErrorCodes::InconsistentReplicaSetNames,
str::stream() << "Our set name of " << ourSetName << " does not match name "
- << rshb
- << " reported by remote node");
+ << rshb << " reported by remote node");
}
const MemberState myState = getMemberState();
@@ -782,8 +781,9 @@ HeartbeatResponseAction TopologyCoordinator::processHeartbeatResponse(
}
const int memberIndex = _rsConfig.findMemberIndexByHostAndPort(target);
if (memberIndex == -1) {
- LOG(1) << "Could not find " << target << " in current config so ignoring --"
- " current config: "
+ LOG(1) << "Could not find " << target
+ << " in current config so ignoring --"
+ " current config: "
<< _rsConfig.toBSON();
HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeNoAction();
nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
@@ -1131,8 +1131,9 @@ HeartbeatResponseAction TopologyCoordinator::_updatePrimaryFromHBDataV1(
bool scheduleCatchupTakeover = false;
bool schedulePriorityTakeover = false;
- if (!catchupTakeoverDisabled && (_memberData.at(primaryIndex).getLastAppliedOpTime() <
- _memberData.at(_selfIndex).getLastAppliedOpTime())) {
+ if (!catchupTakeoverDisabled &&
+ (_memberData.at(primaryIndex).getLastAppliedOpTime() <
+ _memberData.at(_selfIndex).getLastAppliedOpTime())) {
LOG_FOR_ELECTION(2) << "I can take over the primary due to fresher data."
<< " Current primary index: " << primaryIndex << " in term "
<< _memberData.at(primaryIndex).getTerm() << "."
@@ -2712,38 +2713,30 @@ void TopologyCoordinator::processReplSetRequestVotes(const ReplSetRequestVotesAr
if (args.getTerm() < _term) {
response->setVoteGranted(false);
response->setReason(str::stream() << "candidate's term (" << args.getTerm()
- << ") is lower than mine ("
- << _term
- << ")");
+ << ") is lower than mine (" << _term << ")");
} else if (args.getConfigVersion() != _rsConfig.getConfigVersion()) {
response->setVoteGranted(false);
- response->setReason(str::stream() << "candidate's config version ("
- << args.getConfigVersion()
- << ") differs from mine ("
- << _rsConfig.getConfigVersion()
- << ")");
+ response->setReason(str::stream()
+ << "candidate's config version (" << args.getConfigVersion()
+ << ") differs from mine (" << _rsConfig.getConfigVersion() << ")");
} else if (args.getSetName() != _rsConfig.getReplSetName()) {
response->setVoteGranted(false);
- response->setReason(str::stream() << "candidate's set name (" << args.getSetName()
- << ") differs from mine ("
- << _rsConfig.getReplSetName()
- << ")");
+ response->setReason(str::stream()
+ << "candidate's set name (" << args.getSetName()
+ << ") differs from mine (" << _rsConfig.getReplSetName() << ")");
} else if (args.getLastDurableOpTime() < getMyLastAppliedOpTime()) {
response->setVoteGranted(false);
response
->setReason(str::stream()
<< "candidate's data is staler than mine. candidate's last applied OpTime: "
<< args.getLastDurableOpTime().toString()
- << ", my last applied OpTime: "
- << getMyLastAppliedOpTime().toString());
+ << ", my last applied OpTime: " << getMyLastAppliedOpTime().toString());
} else if (!args.isADryRun() && _lastVote.getTerm() == args.getTerm()) {
response->setVoteGranted(false);
response->setReason(str::stream()
<< "already voted for another candidate ("
<< _rsConfig.getMemberAt(_lastVote.getCandidateIndex()).getHostAndPort()
- << ") this term ("
- << _lastVote.getTerm()
- << ")");
+ << ") this term (" << _lastVote.getTerm() << ")");
} else {
int betterPrimary = _findHealthyPrimaryOfEqualOrGreaterPriority(args.getCandidateIndex());
if (_selfConfig().isArbiter() && betterPrimary >= 0) {
diff --git a/src/mongo/db/repl/topology_coordinator.h b/src/mongo/db/repl/topology_coordinator.h
index d73cbd469f9..d53a581e82b 100644
--- a/src/mongo/db/repl/topology_coordinator.h
+++ b/src/mongo/db/repl/topology_coordinator.h
@@ -1072,7 +1072,7 @@ public:
/**
* Gets the number of retries left for this heartbeat attempt. Invalid to call if the current
* state is 'UNINITIALIZED'.
- */
+ */
int retriesLeft() const {
return kMaxHeartbeatRetries - _numFailuresSinceLastStart;
}
diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
index 678b3d87148..6da911b3aca 100644
--- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
@@ -53,9 +53,9 @@
#define ASSERT_NO_ACTION(EXPRESSION) \
ASSERT_EQUALS(mongo::repl::HeartbeatResponseAction::NoAction, (EXPRESSION))
-using std::unique_ptr;
-using mongo::rpc::ReplSetMetadata;
using mongo::rpc::OplogQueryMetadata;
+using mongo::rpc::ReplSetMetadata;
+using std::unique_ptr;
namespace mongo {
namespace repl {
@@ -326,9 +326,7 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -399,44 +397,31 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
}
TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
- updateConfig(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "hself")
- << BSON("_id" << 10 << "host"
- << "h1")
- << BSON("_id" << 20 << "host"
- << "h2"
- << "buildIndexes"
- << false
- << "priority"
- << 0)
- << BSON("_id" << 30 << "host"
- << "h3"
- << "hidden"
- << true
- << "priority"
- << 0
- << "votes"
- << 0)
- << BSON("_id" << 40 << "host"
- << "h4"
- << "arbiterOnly"
- << true)
- << BSON("_id" << 50 << "host"
- << "h5"
- << "slaveDelay"
- << 1
- << "priority"
- << 0)
- << BSON("_id" << 60 << "host"
- << "h6")
- << BSON("_id" << 70 << "host"
- << "hprimary"))),
- 0);
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself")
+ << BSON("_id" << 10 << "host"
+ << "h1")
+ << BSON("_id" << 20 << "host"
+ << "h2"
+ << "buildIndexes" << false << "priority" << 0)
+ << BSON("_id" << 30 << "host"
+ << "h3"
+ << "hidden" << true << "priority" << 0 << "votes" << 0)
+ << BSON("_id" << 40 << "host"
+ << "h4"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 50 << "host"
+ << "h5"
+ << "slaveDelay" << 1 << "priority" << 0)
+ << BSON("_id" << 60 << "host"
+ << "h6")
+ << BSON("_id" << 70 << "host"
+ << "hprimary"))),
+ 0);
setSelfMemberState(MemberState::RS_SECONDARY);
OpTime lastOpTimeWeApplied = OpTime(Timestamp(100, 0), 0);
@@ -573,9 +558,7 @@ TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
TEST_F(TopoCoordTest, NodeWontChooseSyncSourceFromOlderTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself")
<< BSON("_id" << 10 << "host"
@@ -625,10 +608,7 @@ TEST_F(TopoCoordTest, NodeWontChooseSyncSourceFromOlderTerm) {
TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "settings"
- << BSON("chainingAllowed" << false)
+ << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
@@ -752,9 +732,7 @@ TEST_F(TopoCoordTest, ChooseOnlyVotersAsSyncSourceWhenNodeIsAVoter) {
TEST_F(TopoCoordTest, ChooseSameSyncSourceEvenWhenPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -809,9 +787,7 @@ TEST_F(TopoCoordTest, ChooseSameSyncSourceEvenWhenPrimary) {
TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourceIsForciblySet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -862,9 +838,7 @@ TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourc
TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExpires) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -917,10 +891,7 @@ TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExp
TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "settings"
- << BSON("chainingAllowed" << false)
+ << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
@@ -975,9 +946,7 @@ TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDis
TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -1050,9 +1019,7 @@ TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
TEST_F(TopoCoordTest, NodeDoesNotActOnHeartbeatsWhenAbsentFromConfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "h1")
<< BSON("_id" << 20 << "host"
@@ -1086,13 +1053,10 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstArbiter) {
// Test trying to sync from another node when we are an arbiter
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 1 << "host"
<< "h1"))),
0);
@@ -1108,21 +1072,15 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1150,21 +1108,15 @@ TEST_F(TopoCoordTest, NodeReturnsNodeNotFoundWhenSyncFromRequestsANodeNotInConfi
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1187,21 +1139,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsSelf) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1225,21 +1171,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1264,21 +1204,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsAnIndexNonbui
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1303,21 +1237,15 @@ TEST_F(TopoCoordTest, NodeReturnsHostUnreachableWhenSyncFromRequestsADownNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1346,21 +1274,15 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAStaleNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1395,21 +1317,15 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAValidNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1445,21 +1361,15 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1494,21 +1404,15 @@ TEST_F(TopoCoordTest, NodeReturnsUnauthorizedWhenSyncFromRequestsANodeWeAreNotAu
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1562,21 +1466,15 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1837,12 +1735,10 @@ TEST_F(TopoCoordTest, ReplSetGetStatusWriteMajorityDifferentFromMajorityVoteCoun
<< "test1:1234")
<< BSON("_id" << 2 << "host"
<< "test2:1234"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "test3:1234"
- << "arbiterOnly"
- << true))),
+ << "arbiterOnly" << true))),
3,
startupTime + Milliseconds(1));
@@ -1959,13 +1855,10 @@ TEST_F(TopoCoordTest, HeartbeatFrequencyShouldBeHalfElectionTimeoutWhenArbiter)
TEST_F(TopoCoordTest, PrepareStepDownAttemptFailsIfNotLeader) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
getTopoCoord().changeMemberState_forTest(MemberState::RS_SECONDARY);
Status expectedStatus(ErrorCodes::NotMaster, "This node is not a primary. ");
@@ -1979,17 +1872,14 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
<< "h2")
<< BSON("_id" << 30 << "host"
<< "h3"))
- << "settings"
- << BSON("protocolVersion" << 1)),
+ << "settings" << BSON("protocolVersion" << 1)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
}
@@ -2013,8 +1903,8 @@ TEST_F(PrepareHeartbeatResponseV1Test,
prepareHeartbeatResponseV1(args, &response, &result);
stopCapturingLogMessages();
ASSERT_EQUALS(ErrorCodes::InconsistentReplicaSetNames, result);
- ASSERT(result.reason().find("repl set names do not match")) << "Actual string was \""
- << result.reason() << '"';
+ ASSERT(result.reason().find("repl set names do not match"))
+ << "Actual string was \"" << result.reason() << '"';
ASSERT_EQUALS(1,
countLogLinesContaining("replSet set names do not match, ours: rs0; remote "
"node's: rs1"));
@@ -2027,15 +1917,12 @@ TEST_F(PrepareHeartbeatResponseV1Test,
// reconfig self out of set
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "members"
+ << "version" << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 20 << "host"
<< "h2")
<< BSON("_id" << 30 << "host"
<< "h3"))
- << "settings"
- << BSON("protocolVersion" << 1)),
+ << "settings" << BSON("protocolVersion" << 1)),
-1);
ReplSetHeartbeatArgsV1 args;
args.setSetName("rs0");
@@ -2231,9 +2118,7 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenBecomingSecondaryInSingleNodeSet) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"))),
0);
@@ -2251,9 +2136,7 @@ TEST_F(TopoCoordTest, DoNotBecomeCandidateWhenBecomingSecondaryInSingleNodeSetIf
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"))),
0);
@@ -2281,15 +2164,10 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
ReplSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority"
- << 0))))
+ << "priority" << 0))))
.transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2303,9 +2181,7 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"))),
0);
@@ -2319,15 +2195,10 @@ TEST_F(TopoCoordTest,
ReplSetConfig cfg;
ASSERT_OK(cfg.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority"
- << 0)))));
+ << "priority" << 0)))));
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2341,9 +2212,7 @@ TEST_F(TopoCoordTest,
getTopoCoord().adjustMaintenanceCountBy(1);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"))),
0);
@@ -2356,13 +2225,10 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
ReplSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority"
- << 0))))
+ << "priority" << 0))))
.transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
@@ -2381,9 +2247,7 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
// config to be absent from the set
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -2396,9 +2260,7 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
// reconfig to add to set
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2416,9 +2278,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfig) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2432,9 +2292,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfig) {
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -2450,9 +2308,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"))),
0);
@@ -2469,9 +2325,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -2487,11 +2341,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"))),
0);
@@ -2508,13 +2358,10 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
// now lose primary due to loss of electability
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -2529,9 +2376,7 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"))),
0);
@@ -2550,9 +2395,7 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
// Add hosts
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2567,18 +2410,13 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
// Change priorities and tags
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority"
- << 10)
+ << "priority" << 10)
<< BSON("_id" << 1 << "host"
<< "host2:27017"
- << "priority"
- << 5
- << "tags"
+ << "priority" << 5 << "tags"
<< BSON("dc"
<< "NA"
<< "rack"
@@ -2592,9 +2430,7 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
@@ -2608,9 +2444,7 @@ TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
// reconfig and stay secondary
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2625,13 +2459,10 @@ TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
TEST_F(TopoCoordTest, NodeReturnsArbiterWhenGetMemberStateRunsAgainstArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 20 << "host"
<< "h2")
<< BSON("_id" << 30 << "host"
@@ -2650,9 +2481,7 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileRemovedFromTheConfig) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2665,13 +2494,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2684,13 +2508,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
args2
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 1LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response2;
@@ -2705,9 +2524,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2721,14 +2538,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2743,14 +2554,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
args2
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2765,14 +2570,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
args3
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << false << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2787,14 +2586,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
args4
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << false << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2809,9 +2602,7 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2825,14 +2616,8 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << false << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2847,14 +2632,8 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
args2
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << false << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2869,9 +2648,7 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2885,13 +2662,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2904,9 +2676,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2920,13 +2690,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 0LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 1LL
+ << "configVersion" << 0LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2939,9 +2704,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2959,13 +2722,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 1LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2979,9 +2737,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2996,13 +2752,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 3LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 3LL << "candidateIndex" << 1LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -3012,8 +2763,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
ASSERT_EQUALS(
str::stream() << "candidate's data is staler than mine. candidate's last applied OpTime: "
<< OpTime().toString()
- << ", my last applied OpTime: "
- << OpTime(Timestamp(20, 0), 0).toString(),
+ << ", my last applied OpTime: " << OpTime(Timestamp(20, 0), 0).toString(),
response.getReason());
ASSERT_FALSE(response.getVoteGranted());
}
@@ -3021,9 +2771,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3040,13 +2788,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3060,14 +2803,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "dryRun"
- << true
- << "term"
- << 2LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 2LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3082,9 +2819,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3101,13 +2836,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3121,14 +2851,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 2LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 0LL
+ << "dryRun" << true << "term" << 2LL
+ << "candidateIndex" << 1LL << "configVersion" << 0LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3143,9 +2867,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3162,13 +2884,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3181,14 +2898,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 0LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 0LL
+ << "candidateIndex" << 1LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3203,9 +2914,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3222,13 +2931,8 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3242,14 +2946,8 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 1LL
+ << "candidateIndex" << 1LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3264,9 +2962,7 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3283,13 +2979,8 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3303,14 +2994,8 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 3LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 3LL
+ << "candidateIndex" << 1LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3321,8 +3006,7 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
ASSERT_EQUALS(
str::stream() << "candidate's data is staler than mine. candidate's last applied OpTime: "
<< OpTime().toString()
- << ", my last applied OpTime: "
- << OpTime(Timestamp(20, 0), 0).toString(),
+ << ", my last applied OpTime: " << OpTime(Timestamp(20, 0), 0).toString(),
response.getReason());
ASSERT_EQUALS(1, response.getTerm());
ASSERT_FALSE(response.getVoteGranted());
@@ -3338,12 +3022,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedIfCSRSButHaveNoReadCommittedSuppor
updateConfig(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
@@ -3365,12 +3044,7 @@ TEST_F(TopoCoordTest, NodeBecomesSecondaryAsNormalWhenReadCommittedSupportedAndC
updateConfig(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
@@ -3391,18 +3065,14 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
}
@@ -3420,23 +3090,15 @@ TEST_F(HeartbeatResponseTestV1,
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 7
- << "members"
+ << "version" << 7 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "buildIndexes"
- << false
- << "priority"
- << 0)
+ << "buildIndexes" << false << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes"
- << false
- << "priority"
- << 0))),
+ << "buildIndexes" << false << "priority" << 0))),
0);
topoCoordSetMyLastAppliedOpTime(lastOpTimeApplied, Date_t(), false);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(
@@ -3772,15 +3434,12 @@ TEST_F(HeartbeatResponseTestV1, ReconfigNodeRemovedBetweenHeartbeatRequestAndRep
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
ReplSetHeartbeatResponse hb;
@@ -3822,28 +3481,19 @@ TEST_F(HeartbeatResponseTestV1, ReconfigBetweenHeartbeatRequestAndRepsonse) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
ReplSetHeartbeatResponse hb;
hb.initialize(BSON("ok" << 1 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
- << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 1
- << "state"
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime"
+ << Date_t() + Seconds(100) << "v" << 1 << "state"
<< MemberState::RS_PRIMARY),
0,
/*requireWallTime*/ true)
@@ -3902,20 +3552,15 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleAPriorityTakeoverWhenElectableAndReceiveHeartbeatFromLowerPriorityPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 6 << "host"
<< "host7:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -3936,21 +3581,16 @@ TEST_F(HeartbeatResponseTestV1,
TEST_F(HeartbeatResponseTestV1, UpdateHeartbeatDataTermPreventsPriorityTakeover) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 1 << "host"
<< "host1:27017"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 2 << "host"
<< "host2:27017"))
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -3991,18 +3631,14 @@ TEST_F(HeartbeatResponseTestV1, UpdateHeartbeatDataTermPreventsPriorityTakeover)
TEST_F(TopoCoordTest, FreshestNodeDoesCatchupTakeover) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -4048,18 +3684,14 @@ TEST_F(TopoCoordTest, FreshestNodeDoesCatchupTakeover) {
TEST_F(TopoCoordTest, StaleNodeDoesntDoCatchupTakeover) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -4107,18 +3739,14 @@ TEST_F(TopoCoordTest, StaleNodeDoesntDoCatchupTakeover) {
TEST_F(TopoCoordTest, NodeDoesntDoCatchupTakeoverHeartbeatSaysPrimaryCaughtUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -4163,18 +3791,14 @@ TEST_F(TopoCoordTest, NodeDoesntDoCatchupTakeoverHeartbeatSaysPrimaryCaughtUp) {
TEST_F(TopoCoordTest, NodeDoesntDoCatchupTakeoverIfTermNumbersSayPrimaryCaughtUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -4224,19 +3848,14 @@ TEST_F(TopoCoordTest, NodeDoesntDoCatchupTakeoverIfTermNumbersSayPrimaryCaughtUp
TEST_F(TopoCoordTest, StepDownAttemptFailsWhenNotPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4253,19 +3872,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsWhenNotPrimary) {
TEST_F(TopoCoordTest, StepDownAttemptFailsWhenAlreadySteppingDown) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4283,19 +3897,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsWhenAlreadySteppingDown) {
TEST_F(TopoCoordTest, StepDownAttemptFailsForDifferentTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4313,19 +3922,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsForDifferentTerm) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfPastStepDownUntil) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4345,19 +3949,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsIfPastStepDownUntil) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfPastWaitUntil) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4380,19 +3979,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsIfPastWaitUntil) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfNoSecondariesCaughtUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4413,19 +4007,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsIfNoSecondariesCaughtUp) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfNoSecondariesCaughtUpForceIsTrueButNotPastWaitUntil) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4446,19 +4035,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsIfNoSecondariesCaughtUpForceIsTrueButN
TEST_F(TopoCoordTest, StepDownAttemptSucceedsIfNoSecondariesCaughtUpForceIsTrueAndPastWaitUntil) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4479,19 +4063,14 @@ TEST_F(TopoCoordTest, StepDownAttemptSucceedsIfNoSecondariesCaughtUpForceIsTrueA
TEST_F(TopoCoordTest, StepDownAttemptSucceedsIfSecondariesCaughtUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4512,23 +4091,15 @@ TEST_F(TopoCoordTest, StepDownAttemptSucceedsIfSecondariesCaughtUp) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfSecondaryCaughtUpButNotElectable) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017"
- << "priority"
- << 0
- << "hidden"
- << true)
+ << "priority" << 0 << "hidden" << true)
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4558,15 +4129,12 @@ TEST_F(TopoCoordTest,
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
{
BSONObjBuilder statusBuilder;
@@ -4615,15 +4183,12 @@ TEST_F(TopoCoordTest,
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
ASSERT(getTopoCoord().getSyncSourceAddress().empty());
@@ -4688,10 +4253,7 @@ TEST_F(TopoCoordTest, replSetGetStatusForThreeMemberedReplicaSet) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "settings"
- << BSON("chainingAllowed" << false)
+ << "version" << 5 << "settings" << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 30 << "host"
<< "hself:27017")
@@ -4699,8 +4261,7 @@ TEST_F(TopoCoordTest, replSetGetStatusForThreeMemberedReplicaSet) {
<< "hprimary:27017")
<< BSON("_id" << 10 << "host"
<< "h1:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
ASSERT(getTopoCoord().getSyncSourceAddress().empty());
@@ -4791,13 +4352,10 @@ TEST_F(TopoCoordTest, StatusResponseAlwaysIncludesStringStatusFieldsForNonMember
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
-1); // This node is no longer part of this replica set.
BSONObjBuilder statusBuilder;
@@ -4827,9 +4385,7 @@ TEST_F(TopoCoordTest, StatusResponseAlwaysIncludesStringStatusFieldsForNonMember
TEST_F(TopoCoordTest, NoElectionHandoffCandidateInSingleNodeReplicaSet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017"))),
0);
@@ -4844,9 +4400,7 @@ TEST_F(TopoCoordTest, NoElectionHandoffCandidateInSingleNodeReplicaSet) {
TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneLaggedNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
@@ -4867,15 +4421,12 @@ TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneLaggedNode) {
TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneUnelectableNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"
- << "priority"
- << 0))),
+ << "priority" << 0))),
0);
const auto term = getTopoCoord().getTerm();
@@ -4892,17 +4443,14 @@ TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneUnelectableNode) {
TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneLaggedAndOneUnelectableNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017"
- << "priority"
- << 0))),
+ << "priority" << 0))),
0);
const auto term = getTopoCoord().getTerm();
@@ -4922,9 +4470,7 @@ TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneLaggedAndOneUnelectableNo
TEST_F(TopoCoordTest, ExactlyOneNodeEligibleForElectionHandoffOutOfOneSecondary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
@@ -4945,15 +4491,12 @@ TEST_F(TopoCoordTest, ExactlyOneNodeEligibleForElectionHandoffOutOfOneSecondary)
TEST_F(TopoCoordTest, ExactlyOneNodeEligibleForElectionHandoffOutOfThreeSecondaries) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
@@ -4982,17 +4525,14 @@ TEST_F(TopoCoordTest, ExactlyOneNodeEligibleForElectionHandoffOutOfThreeSecondar
TEST_F(TopoCoordTest, TwoNodesEligibleForElectionHandoffResolveByPriority) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017"
- << "priority"
- << 5))),
+ << "priority" << 5))),
0);
const auto term = getTopoCoord().getTerm();
@@ -5014,9 +4554,7 @@ TEST_F(TopoCoordTest, TwoNodesEligibleForElectionHandoffResolveByPriority) {
TEST_F(TopoCoordTest, TwoNodesEligibleForElectionHandoffEqualPriorityResolveByMemberId) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
@@ -5045,23 +4583,17 @@ TEST_F(TopoCoordTest, ArbiterNotIncludedInW3WriteInPSSAReplSet) {
// In a PSSA set, a w:3 write should only be acknowledged if both secondaries can satisfy it.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017"
- << "priority"
- << 0
- << "votes"
- << 0)
+ << "priority" << 0 << "votes" << 0)
<< BSON("_id" << 3 << "host"
<< "host3:27017"
- << "arbiterOnly"
- << true))),
+ << "arbiterOnly" << true))),
0);
const auto term = getTopoCoord().getTerm();
@@ -5090,31 +4622,21 @@ TEST_F(TopoCoordTest, ArbitersNotIncludedInW2WriteInPSSAAReplSet) {
// can satisfy it.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"
- << "priority"
- << 0
- << "votes"
- << 0)
+ << "priority" << 0 << "votes" << 0)
<< BSON("_id" << 2 << "host"
<< "host2:27017"
- << "priority"
- << 0
- << "votes"
- << 0)
+ << "priority" << 0 << "votes" << 0)
<< BSON("_id" << 3 << "host"
<< "host3:27017"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 4 << "host"
<< "host4:27017"
- << "arbiterOnly"
- << true))),
+ << "arbiterOnly" << true))),
0);
const auto term = getTopoCoord().getTerm();
@@ -5139,59 +4661,52 @@ TEST_F(TopoCoordTest, ArbitersNotIncludedInW2WriteInPSSAAReplSet) {
TEST_F(TopoCoordTest, CheckIfCommitQuorumCanBeSatisfied) {
ReplSetConfig configA;
- ASSERT_OK(configA.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node0"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA1"))
- << BSON("_id" << 1 << "host"
- << "node1"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA2"))
- << BSON("_id" << 2 << "host"
- << "node2"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA3"))
- << BSON("_id" << 3 << "host"
- << "node3"
- << "tags"
- << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU1"))
- << BSON("_id" << 4 << "host"
- << "node4"
- << "tags"
- << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU2"))
- << BSON("_id" << 5 << "host"
- << "node5"
- << "arbiterOnly"
- << true))
- << "settings"
- << BSON("getLastErrorModes"
- << BSON("valid" << BSON("dc" << 2 << "rack" << 3)
- << "invalidNotEnoughValues"
- << BSON("dc" << 3)
- << "invalidNotEnoughNodes"
- << BSON("rack" << 6))))));
+ ASSERT_OK(configA.initialize(BSON(
+ "_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node0"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA1"))
+ << BSON("_id" << 1 << "host"
+ << "node1"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA2"))
+ << BSON("_id" << 2 << "host"
+ << "node2"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA3"))
+ << BSON("_id" << 3 << "host"
+ << "node3"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU1"))
+ << BSON("_id" << 4 << "host"
+ << "node4"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU2"))
+ << BSON("_id" << 5 << "host"
+ << "node5"
+ << "arbiterOnly" << true))
+ << "settings"
+ << BSON("getLastErrorModes" << BSON(
+ "valid" << BSON("dc" << 2 << "rack" << 3) << "invalidNotEnoughValues"
+ << BSON("dc" << 3) << "invalidNotEnoughNodes" << BSON("rack" << 6))))));
getTopoCoord().updateConfig(configA, -1, Date_t());
std::vector<MemberConfig> memberConfig;
@@ -5362,18 +4877,14 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleACatchupTakeoverWhenElectableAndReceiveHeartbeatFromPrimaryInCatchup) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 6 << "host"
<< "host7:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -5396,22 +4907,16 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleACatchupTakeoverWhenBothCatchupAndPriorityTakeoverPossible) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 6 << "host"
<< "host7:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -5434,43 +4939,26 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleElectionIfAMajorityOfVotersIsVisibleEvenThoughATrueMajorityIsNot) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 3 << "host"
<< "host4:27017"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 4 << "host"
<< "host5:27017"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 5 << "host"
<< "host6:27017"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 6 << "host"
<< "host7:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5539,19 +5027,15 @@ TEST_F(HeartbeatResponseTestV1,
NodeDoesNotStandForElectionWhenPrimaryIsMarkedDownViaHeartbeatButWeAreAnArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
OpTime election = OpTime(Timestamp(400, 0), 0);
@@ -5652,19 +5136,15 @@ TEST_F(HeartbeatResponseTestV1,
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
OpTime election = OpTime(Timestamp(400, 0), 0);
@@ -5739,21 +5219,15 @@ TEST_F(HeartbeatResponseTestV1,
// multiprimary states in PV1.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5778,21 +5252,15 @@ TEST_F(HeartbeatResponseTestV1,
// multiprimary states in PV1.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
OpTime staleTime = OpTime();
@@ -5814,21 +5282,15 @@ TEST_F(HeartbeatResponseTestV1,
// multiprimary states in PV1.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
@@ -5851,21 +5313,15 @@ TEST_F(HeartbeatResponseTestV1,
// in all multiprimary states in PV1.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -6022,21 +5478,15 @@ TEST_F(HeartbeatResponseTestV1, ShouldNotChangeSyncSourceWhenFresherMemberDoesNo
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes"
- << false
- << "priority"
- << 0))
- << "protocolVersion"
- << 1),
+ << "buildIndexes" << false << "priority" << 0))
+ << "protocolVersion" << 1),
0);
topoCoordSetMyLastAppliedOpTime(lastOpTimeApplied, Date_t(), false);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(
@@ -6350,18 +5800,14 @@ TEST_F(HeartbeatResponseHighVerbosityTestV1, UpdateHeartbeatDataSameConfig) {
originalConfig
.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)))
.transitional_ignore();
diff --git a/src/mongo/db/repl/vote_requester_test.cpp b/src/mongo/db/repl/vote_requester_test.cpp
index 4fc8382bf4a..dd8dedb0ccb 100644
--- a/src/mongo/db/repl/vote_requester_test.cpp
+++ b/src/mongo/db/repl/vote_requester_test.cpp
@@ -59,31 +59,23 @@ class VoteRequesterTest : public mongo::unittest::Test {
public:
virtual void setUp() {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1")
- << BSON("_id" << 2 << "host"
- << "host2")
- << BSON("_id" << 3 << "host"
- << "host3"
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 4 << "host"
- << "host4"
- << "votes"
- << 0
- << "priority"
- << 0)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes" << 0 << "priority"
+ << 0)))));
ASSERT_OK(config.validate());
long long candidateId = 0;
long long term = 2;
@@ -216,31 +208,23 @@ class VoteRequesterDryRunTest : public VoteRequesterTest {
public:
virtual void setUp() {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1")
- << BSON("_id" << 2 << "host"
- << "host2")
- << BSON("_id" << 3 << "host"
- << "host3"
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 4 << "host"
- << "host4"
- << "votes"
- << 0
- << "priority"
- << 0)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes" << 0 << "priority"
+ << 0)))));
ASSERT_OK(config.validate());
long long candidateId = 0;
long long term = 2;
@@ -261,11 +245,7 @@ public:
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0")
<< BSON("_id" << 1 << "host"
diff --git a/src/mongo/db/repl_index_build_state.h b/src/mongo/db/repl_index_build_state.h
index cd50f2c0289..363eba6eb94 100644
--- a/src/mongo/db/repl_index_build_state.h
+++ b/src/mongo/db/repl_index_build_state.h
@@ -146,9 +146,7 @@ private:
invariant(!name.empty(),
str::stream()
<< "Bad spec passed into ReplIndexBuildState constructor, missing '"
- << IndexDescriptor::kIndexNameFieldName
- << "' field: "
- << spec);
+ << IndexDescriptor::kIndexNameFieldName << "' field: " << spec);
indexNames.push_back(name);
}
return indexNames;
diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp
index 55cbfecfa9c..a3854cb9038 100644
--- a/src/mongo/db/s/active_migrations_registry.cpp
+++ b/src/mongo/db/s/active_migrations_registry.cpp
@@ -148,9 +148,7 @@ Status ActiveMigrationsRegistry::ActiveMoveChunkState::constructErrorStatus() co
str::stream() << "Unable to start new migration because this shard is currently "
"donating chunk "
<< ChunkRange(args.getMinKey(), args.getMaxKey()).toString()
- << " for namespace "
- << args.getNss().ns()
- << " to "
+ << " for namespace " << args.getNss().ns() << " to "
<< args.getToShardId()};
}
@@ -158,10 +156,7 @@ Status ActiveMigrationsRegistry::ActiveReceiveChunkState::constructErrorStatus()
return {ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Unable to start new migration because this shard is currently "
"receiving chunk "
- << range.toString()
- << " for namespace "
- << nss.ns()
- << " from "
+ << range.toString() << " for namespace " << nss.ns() << " from "
<< fromShardId};
}
diff --git a/src/mongo/db/s/active_move_primaries_registry.cpp b/src/mongo/db/s/active_move_primaries_registry.cpp
index dcc15c2ceb6..fa383581038 100644
--- a/src/mongo/db/s/active_move_primaries_registry.cpp
+++ b/src/mongo/db/s/active_move_primaries_registry.cpp
@@ -90,9 +90,7 @@ Status ActiveMovePrimariesRegistry::ActiveMovePrimaryState::constructErrorStatus
str::stream()
<< "Unable to start new movePrimary operation because this shard is currently "
"moving its primary for namespace "
- << requestArgs.get_shardsvrMovePrimary()->ns()
- << " to "
- << requestArgs.getTo()};
+ << requestArgs.get_shardsvrMovePrimary()->ns() << " to " << requestArgs.getTo()};
}
ScopedMovePrimary::ScopedMovePrimary(ActiveMovePrimariesRegistry* registry,
diff --git a/src/mongo/db/s/active_move_primaries_registry.h b/src/mongo/db/s/active_move_primaries_registry.h
index 8ddd051478e..38b19a6c94f 100644
--- a/src/mongo/db/s/active_move_primaries_registry.h
+++ b/src/mongo/db/s/active_move_primaries_registry.h
@@ -159,4 +159,4 @@ private:
// This is the future, which will be signaled at the end of a movePrimary command.
std::shared_ptr<Notification<Status>> _completionNotification;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/s/active_move_primaries_registry_test.cpp b/src/mongo/db/s/active_move_primaries_registry_test.cpp
index 70d7265d66e..6b6541accf6 100644
--- a/src/mongo/db/s/active_move_primaries_registry_test.cpp
+++ b/src/mongo/db/s/active_move_primaries_registry_test.cpp
@@ -27,9 +27,9 @@
* it in the license file.
*/
-#include "mongo/db/s/active_move_primaries_registry.h"
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/client.h"
+#include "mongo/db/s/active_move_primaries_registry.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/s/request_types/move_primary_gen.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/s/active_shard_collection_registry.cpp b/src/mongo/db/s/active_shard_collection_registry.cpp
index eb6c42923a6..6a01fdd90ee 100644
--- a/src/mongo/db/s/active_shard_collection_registry.cpp
+++ b/src/mongo/db/s/active_shard_collection_registry.cpp
@@ -134,11 +134,9 @@ Status ActiveShardCollectionRegistry::ActiveShardCollectionState::constructError
return {ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Unable to shard collection "
<< request.get_shardsvrShardCollection().get().ns()
- << " with arguments: "
- << request.toBSON()
+ << " with arguments: " << request.toBSON()
<< " because this shard is currently running shard collection on this "
- << "collection with arguments: "
- << activeRequest.toBSON()};
+ << "collection with arguments: " << activeRequest.toBSON()};
}
ScopedShardCollection::ScopedShardCollection(std::string nss,
diff --git a/src/mongo/db/s/add_shard_util.cpp b/src/mongo/db/s/add_shard_util.cpp
index 466d1a3fe6d..0dae94c0102 100644
--- a/src/mongo/db/s/add_shard_util.cpp
+++ b/src/mongo/db/s/add_shard_util.cpp
@@ -77,5 +77,5 @@ BSONObj createShardIdentityUpsertForAddShard(const AddShard& addShardCmd) {
return request.toBSON();
}
-} // namespace mongo
} // namespace add_shard_util
+} // namespace mongo
diff --git a/src/mongo/db/s/add_shard_util.h b/src/mongo/db/s/add_shard_util.h
index b7ab9fd0b36..020831833ba 100644
--- a/src/mongo/db/s/add_shard_util.h
+++ b/src/mongo/db/s/add_shard_util.h
@@ -60,5 +60,5 @@ AddShard createAddShardCmd(OperationContext* opCtx, const ShardId& shardName);
*/
BSONObj createShardIdentityUpsertForAddShard(const AddShard& addShardCmd);
-} // namespace mongo
} // namespace add_shard_util
+} // namespace mongo
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index e6d358e982b..a8f7cde67f4 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -441,12 +441,10 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
if (chunkAtZoneMin.getMin().woCompare(tagRange.min)) {
return {ErrorCodes::IllegalOperation,
str::stream()
- << "Tag boundaries "
- << tagRange.toString()
+ << "Tag boundaries " << tagRange.toString()
<< " fall in the middle of an existing chunk "
<< ChunkRange(chunkAtZoneMin.getMin(), chunkAtZoneMin.getMax()).toString()
- << ". Balancing for collection "
- << nss.ns()
+ << ". Balancing for collection " << nss.ns()
<< " will be postponed until the chunk is split appropriately."};
}
@@ -462,12 +460,10 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
chunkAtZoneMax.getMax().woCompare(tagRange.max)) {
return {ErrorCodes::IllegalOperation,
str::stream()
- << "Tag boundaries "
- << tagRange.toString()
+ << "Tag boundaries " << tagRange.toString()
<< " fall in the middle of an existing chunk "
<< ChunkRange(chunkAtZoneMax.getMin(), chunkAtZoneMax.getMax()).toString()
- << ". Balancing for collection "
- << nss.ns()
+ << ". Balancing for collection " << nss.ns()
<< " will be postponed until the chunk is split appropriately."};
}
}
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index 7b8a7a021c1..8f9cbc9b8ef 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -127,8 +127,7 @@ Status DistributionStatus::addRangeToZone(const ZoneRange& range) {
return {ErrorCodes::RangeOverlapConflict,
str::stream() << "Zone range: " << range.toString()
- << " is overlapping with existing: "
- << intersectingRange.toString()};
+ << " is overlapping with existing: " << intersectingRange.toString()};
}
// Check for containment
@@ -138,8 +137,7 @@ Status DistributionStatus::addRangeToZone(const ZoneRange& range) {
invariant(SimpleBSONObjComparator::kInstance.evaluate(range.max < nextRange.max));
return {ErrorCodes::RangeOverlapConflict,
str::stream() << "Zone range: " << range.toString()
- << " is overlapping with existing: "
- << nextRange.toString()};
+ << " is overlapping with existing: " << nextRange.toString()};
}
}
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index b131bbafde7..0a988cf1b13 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -517,7 +517,7 @@ void MigrationManager::_schedule(WithLock lock,
StatusWith<executor::TaskExecutor::CallbackHandle> callbackHandleWithStatus =
executor->scheduleRemoteCommand(
remoteRequest,
- [ this, service = opCtx->getServiceContext(), itMigration ](
+ [this, service = opCtx->getServiceContext(), itMigration](
const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
ThreadClient tc(getThreadName(), service);
auto opCtx = cc().makeOperationContext();
@@ -614,8 +614,7 @@ Status MigrationManager::_processRemoteCommandResponse(
scopedMigrationRequest->keepDocumentOnDestruct();
return {ErrorCodes::BalancerInterrupted,
stream() << "Migration interrupted because the balancer is stopping."
- << " Command status: "
- << remoteCommandResponse.status.toString()};
+ << " Command status: " << remoteCommandResponse.status.toString()};
}
if (!remoteCommandResponse.isOK()) {
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index 37749bba329..ff801ee67e6 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -138,17 +138,17 @@ protected:
// Random static initialization order can result in X constructor running before Y constructor
// if X and Y are defined in different source files. Defining variables here to enforce order.
const BSONObj kShard0 =
- BSON(ShardType::name(kShardId0.toString()) << ShardType::host(kShardHost0.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId0.toString())
+ << ShardType::host(kShardHost0.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const BSONObj kShard1 =
- BSON(ShardType::name(kShardId1.toString()) << ShardType::host(kShardHost1.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId1.toString())
+ << ShardType::host(kShardHost1.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const BSONObj kShard2 =
- BSON(ShardType::name(kShardId2.toString()) << ShardType::host(kShardHost2.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId2.toString())
+ << ShardType::host(kShardHost2.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const BSONObj kShard3 =
- BSON(ShardType::name(kShardId3.toString()) << ShardType::host(kShardHost3.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId3.toString())
+ << ShardType::host(kShardHost3.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const KeyPattern kKeyPattern = KeyPattern(BSON(kPattern << 1));
diff --git a/src/mongo/db/s/balancer/scoped_migration_request.cpp b/src/mongo/db/s/balancer/scoped_migration_request.cpp
index a0ef6dadf16..40441637ba4 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request.cpp
@@ -118,8 +118,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
if (!statusWithMigrationQueryResult.isOK()) {
return statusWithMigrationQueryResult.getStatus().withContext(
str::stream() << "Failed to verify whether conflicting migration is in "
- << "progress for migration '"
- << redact(migrateInfo.toString())
+ << "progress for migration '" << redact(migrateInfo.toString())
<< "' while trying to query config.migrations.");
}
if (statusWithMigrationQueryResult.getValue().docs.empty()) {
@@ -134,11 +133,9 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
if (!statusWithActiveMigration.isOK()) {
return statusWithActiveMigration.getStatus().withContext(
str::stream() << "Failed to verify whether conflicting migration is in "
- << "progress for migration '"
- << redact(migrateInfo.toString())
+ << "progress for migration '" << redact(migrateInfo.toString())
<< "' while trying to parse active migration document '"
- << redact(activeMigrationBSON.toString())
- << "'.");
+ << redact(activeMigrationBSON.toString()) << "'.");
}
MigrateInfo activeMigrateInfo = statusWithActiveMigration.getValue().toMigrateInfo();
@@ -172,8 +169,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
str::stream() << "Failed to insert the config.migrations document after max "
<< "number of retries. Chunk '"
<< ChunkRange(migrateInfo.minKey, migrateInfo.maxKey).toString()
- << "' in collection '"
- << migrateInfo.nss.ns()
+ << "' in collection '" << migrateInfo.nss.ns()
<< "' was being moved (somewhere) by another operation.");
}
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index 88cd5d4b0eb..b921f530460 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -165,8 +165,8 @@ public:
BSONObjIterator i(currKey);
for (int k = 0; k < keyPatternLength; k++) {
if (!i.more()) {
- errmsg = str::stream() << "index key " << currKey << " too short for pattern "
- << keyPattern;
+ errmsg = str::stream()
+ << "index key " << currKey << " too short for pattern " << keyPattern;
return false;
}
BSONElement currKeyElt = i.next();
@@ -192,8 +192,9 @@ public:
const string msg = str::stream()
<< "There are documents which have missing or incomplete shard key fields ("
- << redact(currKey) << "). Please ensure that all documents in the collection "
- "include all fields from the shard key.";
+ << redact(currKey)
+ << "). Please ensure that all documents in the collection "
+ "include all fields from the shard key.";
log() << "checkShardingIndex for '" << nss.toString() << "' failed: " << msg;
errmsg = msg;
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index cfe972510a7..049ab0ae261 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -176,8 +176,7 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx,
uassert(40618,
str::stream() << "failed to initialize cursor during auto split due to "
- << "connection problem with "
- << client.getServerAddress(),
+ << "connection problem with " << client.getServerAddress(),
cursor.get() != nullptr);
if (cursor->more()) {
@@ -273,8 +272,8 @@ void ChunkSplitter::trySplitting(std::shared_ptr<ChunkSplitStateDriver> chunkSpl
return;
}
_threadPool.schedule(
- [ this, csd = std::move(chunkSplitStateDriver), nss, min, max, dataWritten ](
- auto status) noexcept {
+ [ this, csd = std::move(chunkSplitStateDriver), nss, min, max,
+ dataWritten ](auto status) noexcept {
invariant(status);
_runAutosplit(csd, nss, min, max, dataWritten);
@@ -384,7 +383,8 @@ void ChunkSplitter::_runAutosplit(std::shared_ptr<ChunkSplitStateDriver> chunkSp
log() << "autosplitted " << nss << " chunk: " << redact(chunk.toString()) << " into "
<< (splitPoints.size() + 1) << " parts (maxChunkSizeBytes " << maxChunkSizeBytes
<< ")"
- << (topChunkMinKey.isEmpty() ? "" : " (top chunk migration suggested" +
+ << (topChunkMinKey.isEmpty() ? ""
+ : " (top chunk migration suggested" +
(std::string)(shouldBalance ? ")" : ", but no migrations allowed)"));
// Because the ShardServerOpObserver uses the metadata from the CSS for tracking incoming
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index 28eab0d23bb..303c8a7a602 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -89,9 +89,9 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx,
BSONObj keyPattern = metadata->getKeyPattern();
if (!startingFromKey.isEmpty()) {
if (!metadata->isValidKey(startingFromKey)) {
- *errMsg = str::stream() << "could not cleanup orphaned data, start key "
- << startingFromKey << " does not match shard key pattern "
- << keyPattern;
+ *errMsg = str::stream()
+ << "could not cleanup orphaned data, start key " << startingFromKey
+ << " does not match shard key pattern " << keyPattern;
log() << *errMsg;
return CleanupResult_Error;
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index bb9063b8395..aca45d987ae 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -132,8 +132,7 @@ Status CollectionMetadata::checkChunkIsValid(const ChunkType& chunk) const {
return {ErrorCodes::StaleShardVersion,
str::stream() << "Unable to find chunk with the exact bounds "
<< ChunkRange(chunk.getMin(), chunk.getMax()).toString()
- << " at collection version "
- << getCollVersion().toString()};
+ << " at collection version " << getCollVersion().toString()};
}
return Status::OK();
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index d125e651adc..34ff588020f 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -132,8 +132,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInTheFuture) {
{
BSONObj readConcern = BSON("readConcern" << BSON("level"
<< "snapshot"
- << "atClusterTime"
- << Timestamp(100, 0)));
+ << "atClusterTime" << Timestamp(100, 0)));
auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
@@ -163,8 +162,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInThePast) {
{
BSONObj readConcern = BSON("readConcern" << BSON("level"
<< "snapshot"
- << "atClusterTime"
- << Timestamp(50, 0)));
+ << "atClusterTime" << Timestamp(50, 0)));
auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
@@ -202,8 +200,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsTooFarInThePastThrowsStal
{
BSONObj readConcern = BSON("readConcern" << BSON("level"
<< "snapshot"
- << "atClusterTime"
- << Timestamp(10, 0)));
+ << "atClusterTime" << Timestamp(10, 0)));
auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index ac36558a234..035bd4777f8 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -112,8 +112,7 @@ TEST_F(NoChunkFixture, IsValidKey) {
ASSERT(makeCollectionMetadata()->isValidKey(BSON("a" << 3)));
ASSERT(!makeCollectionMetadata()->isValidKey(BSON("a"
<< "abcde"
- << "b"
- << 1)));
+ << "b" << 1)));
ASSERT(!makeCollectionMetadata()->isValidKey(BSON("c"
<< "abcde")));
}
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index 1b63c1ce74c..d5affc26cc0 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -178,14 +178,8 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
NamespaceString::kServerConfigurationNamespace.ns(),
BSON("_id"
<< "startRangeDeletion"
- << "ns"
- << nss.ns()
- << "epoch"
- << epoch
- << "min"
- << range->getMin()
- << "max"
- << range->getMax()));
+ << "ns" << nss.ns() << "epoch" << epoch << "min"
+ << range->getMin() << "max" << range->getMax()));
} catch (const DBException& e) {
stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
csr->_metadataManager->_clearAllCleanups(
@@ -354,8 +348,8 @@ StatusWith<int> CollectionRangeDeleter::_doDeletion(OperationContext* opCtx,
auto catalog = collection->getIndexCatalog();
const IndexDescriptor* idx = catalog->findShardKeyPrefixedIndex(opCtx, keyPattern, false);
if (!idx) {
- std::string msg = str::stream() << "Unable to find shard key index for "
- << keyPattern.toString() << " in " << nss.ns();
+ std::string msg = str::stream()
+ << "Unable to find shard key index for " << keyPattern.toString() << " in " << nss.ns();
LOG(0) << msg;
return {ErrorCodes::InternalError, msg};
}
@@ -375,8 +369,8 @@ StatusWith<int> CollectionRangeDeleter::_doDeletion(OperationContext* opCtx,
const IndexDescriptor* descriptor =
collection->getIndexCatalog()->findIndexByName(opCtx, indexName);
if (!descriptor) {
- std::string msg = str::stream() << "shard key index with name " << indexName << " on '"
- << nss.ns() << "' was dropped";
+ std::string msg = str::stream()
+ << "shard key index with name " << indexName << " on '" << nss.ns() << "' was dropped";
LOG(0) << msg;
return {ErrorCodes::InternalError, msg};
}
diff --git a/src/mongo/db/s/collection_range_deleter.h b/src/mongo/db/s/collection_range_deleter.h
index 6fae0ee5d18..0ebc79ac8a6 100644
--- a/src/mongo/db/s/collection_range_deleter.h
+++ b/src/mongo/db/s/collection_range_deleter.h
@@ -59,14 +59,14 @@ class CollectionRangeDeleter {
public:
/**
- * This is an object n that asynchronously changes state when a scheduled range deletion
- * completes or fails. Call n.ready() to discover if the event has already occurred. Call
- * n.waitStatus(opCtx) to sleep waiting for the event, and get its result. If the wait is
- * interrupted, waitStatus throws.
- *
- * It is an error to destroy a returned CleanupNotification object n unless either n.ready()
- * is true or n.abandon() has been called. After n.abandon(), n is in a moved-from state.
- */
+ * This is an object n that asynchronously changes state when a scheduled range deletion
+ * completes or fails. Call n.ready() to discover if the event has already occurred. Call
+ * n.waitStatus(opCtx) to sleep waiting for the event, and get its result. If the wait is
+ * interrupted, waitStatus throws.
+ *
+ * It is an error to destroy a returned CleanupNotification object n unless either n.ready()
+ * is true or n.abandon() has been called. After n.abandon(), n is in a moved-from state.
+ */
class DeleteNotification {
public:
DeleteNotification();
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index fd09b44ace5..a772a28c8d7 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -163,8 +163,7 @@ Status CollectionShardingRuntime::waitForClean(OperationContext* opCtx,
Status result = stillScheduled->waitStatus(opCtx);
if (!result.isOK()) {
return result.withContext(str::stream() << "Failed to delete orphaned " << nss.ns()
- << " range "
- << orphanRange.toString());
+ << " range " << orphanRange.toString());
}
}
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index d085f9440f3..1ee6cfbeed8 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -80,12 +80,9 @@ TEST_F(DeleteStateTest, MakeDeleteStateUnsharded) {
auto doc = BSON("key3"
<< "abc"
- << "key"
- << 3
- << "_id"
+ << "key" << 3 << "_id"
<< "hello"
- << "key2"
- << true);
+ << "key2" << true);
// Check that an order for deletion from an unsharded collection extracts just the "_id" field
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
@@ -103,12 +100,9 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithoutIdInShardKey) {
// The order of fields in `doc` deliberately does not match the shard key
auto doc = BSON("key3"
<< "abc"
- << "key"
- << 100
- << "_id"
+ << "key" << 100 << "_id"
<< "hello"
- << "key2"
- << true);
+ << "key2" << true);
// Verify the shard key is extracted, in correct order, followed by the "_id" field.
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
@@ -130,15 +124,13 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdInShardKey) {
<< "abc"
<< "_id"
<< "hello"
- << "key"
- << 100);
+ << "key" << 100);
// Verify the shard key is extracted with "_id" in the right place.
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
BSON("key" << 100 << "_id"
<< "hello"
- << "key2"
- << true));
+ << "key2" << true));
ASSERT_FALSE(OpObserverShardingImpl::isMigrating(operationContext(), kTestNss, doc));
}
@@ -151,8 +143,7 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdHashInShardKey) {
auto doc = BSON("key2" << true << "_id"
<< "hello"
- << "key"
- << 100);
+ << "key" << 100);
// Verify the shard key is extracted with "_id" in the right place, not hashed.
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
diff --git a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
index b1c3717f3ff..e9ca1356b62 100644
--- a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
+++ b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
@@ -50,8 +50,8 @@
namespace mongo {
-using std::shared_ptr;
using std::set;
+using std::shared_ptr;
using std::string;
namespace {
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
index eea3b876e46..fe5c843303e 100644
--- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
@@ -160,10 +160,9 @@ public:
if (!toShardStatus.isOK()) {
log() << "Could not move database '" << dbname << "' to shard '" << to
<< causedBy(toShardStatus.getStatus());
- uassertStatusOKWithContext(
- toShardStatus.getStatus(),
- str::stream() << "Could not move database '" << dbname << "' to shard '" << to
- << "'");
+ uassertStatusOKWithContext(toShardStatus.getStatus(),
+ str::stream() << "Could not move database '" << dbname
+ << "' to shard '" << to << "'");
}
return toShardStatus.getValue();
diff --git a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
index 21901105103..ff1334ef1ed 100644
--- a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
+++ b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
@@ -96,8 +96,7 @@ public:
uassert(ErrorCodes::StaleEpoch,
str::stream()
- << "refineCollectionShardKey namespace "
- << nss.toString()
+ << "refineCollectionShardKey namespace " << nss.toString()
<< " has a different epoch than mongos had in its routing table cache",
request().getEpoch() == collType.getEpoch());
diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
index 2f39f852bc8..5186128ef8c 100644
--- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
@@ -112,8 +112,8 @@ public:
const auto shardStatus =
Grid::get(opCtx)->shardRegistry()->getShard(opCtx, ShardId(target));
if (!shardStatus.isOK()) {
- std::string msg(str::stream() << "Could not drop shard '" << target
- << "' because it does not exist");
+ std::string msg(str::stream()
+ << "Could not drop shard '" << target << "' because it does not exist");
log() << msg;
uasserted(ErrorCodes::ShardNotFound, msg);
}
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index e53552916d8..216d3bbaa2c 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -99,8 +99,7 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation));
uassert(ErrorCodes::BadValue,
str::stream() << "The collation for shardCollection must be {locale: 'simple'}, "
- << "but found: "
- << collation,
+ << "but found: " << collation,
!collator);
simpleCollationSpecified = true;
}
@@ -114,8 +113,7 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
int numChunks = request->getNumInitialChunks();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "numInitialChunks cannot be more than either: "
- << maxNumInitialChunksForShards
- << ", 8192 * number of shards; or "
+ << maxNumInitialChunksForShards << ", 8192 * number of shards; or "
<< maxNumInitialChunksTotal,
numChunks >= 0 && numChunks <= maxNumInitialChunksForShards &&
numChunks <= maxNumInitialChunksTotal);
@@ -208,9 +206,7 @@ void migrateAndFurtherSplitInitialChunks(OperationContext* opCtx,
auto chunkManager = routingInfo.cm();
// Move and commit each "big chunk" to a different shard.
- auto nextShardId = [&, indx = 0 ]() mutable {
- return shardIds[indx++ % shardIds.size()];
- };
+ auto nextShardId = [&, indx = 0]() mutable { return shardIds[indx++ % shardIds.size()]; };
for (auto chunk : chunkManager->chunks()) {
const auto shardId = nextShardId();
@@ -323,10 +319,7 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx,
uassert(ErrorCodes::InternalError,
str::stream() << "expected the primary shard host " << primaryShard->getConnString()
- << " for database "
- << nss.db()
- << " to return an entry for "
- << nss.ns()
+ << " for database " << nss.db() << " to return an entry for " << nss.ns()
<< " in its listCollections response, but it did not",
!res.isEmpty());
@@ -338,15 +331,12 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx,
uassert(ErrorCodes::InternalError,
str::stream() << "expected primary shard to return 'info' field as part of "
"listCollections for "
- << nss.ns()
- << ", but got "
- << res,
+ << nss.ns() << ", but got " << res,
!collectionInfo.isEmpty());
uassert(ErrorCodes::InternalError,
str::stream() << "expected primary shard to return a UUID for collection " << nss.ns()
- << " as part of 'info' field but got "
- << res,
+ << " as part of 'info' field but got " << res,
collectionInfo.hasField("uuid"));
return uassertStatusOK(UUID::parse(collectionInfo["uuid"]));
@@ -576,8 +566,7 @@ public:
if (fromMapReduce) {
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Map reduce with sharded output to a new collection found "
- << nss.ns()
- << " to be non-empty which is not supported.",
+ << nss.ns() << " to be non-empty which is not supported.",
isEmpty);
}
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 71931babb73..9d882e45678 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -223,7 +223,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
const auto& keyPattern = shardKeyPattern.getKeyPattern();
- auto nextShardIdForHole = [&, indx = 0 ]() mutable {
+ auto nextShardIdForHole = [&, indx = 0]() mutable {
return shardIdsForGaps[indx++ % shardIdsForGaps.size()];
};
@@ -250,10 +250,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
const auto& shardIdsForChunk = it->second;
uassert(50973,
str::stream()
- << "Cannot shard collection "
- << nss.ns()
- << " due to zone "
- << tag.getTag()
+ << "Cannot shard collection " << nss.ns() << " due to zone " << tag.getTag()
<< " which is not assigned to a shard. Please assign this zone to a shard.",
!shardIdsForChunk.empty());
@@ -396,7 +393,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::createFirstChunksU
shardSelectedSplitPoints,
shardIds,
1 // numContiguousChunksPerShard
- );
+ );
}
boost::optional<CollectionType> InitialSplitPolicy::checkIfCollectionAlreadyShardedWithSameOptions(
@@ -425,8 +422,7 @@ boost::optional<CollectionType> InitialSplitPolicy::checkIfCollectionAlreadyShar
// match the options the collection was originally sharded with.
uassert(ErrorCodes::AlreadyInitialized,
str::stream() << "sharding already enabled for collection " << nss.ns()
- << " with options "
- << existingOptions.toString(),
+ << " with options " << existingOptions.toString(),
requestedOptions.hasSameOptions(existingOptions));
return existingOptions;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index fc610ed35a3..424db73a9d0 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -198,8 +198,7 @@ Status ShardingCatalogManager::_initConfigVersion(OperationContext* opCtx) {
if (versionInfo.getCurrentVersion() < CURRENT_CONFIG_VERSION) {
return {ErrorCodes::IncompatibleShardingConfigVersion,
str::stream() << "need to upgrade current cluster version to v"
- << CURRENT_CONFIG_VERSION
- << "; currently at v"
+ << CURRENT_CONFIG_VERSION << "; currently at v"
<< versionInfo.getCurrentVersion()};
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index eee16cc6aa5..e92588cbe07 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -129,8 +129,9 @@ protected:
ASSERT_EQ(request.target, target);
ASSERT_EQ(request.dbname, nss.db());
ASSERT_BSONOBJ_EQ(request.cmdObj,
- BSON("drop" << nss.coll() << "writeConcern" << BSON("w"
- << "majority")));
+ BSON("drop" << nss.coll() << "writeConcern"
+ << BSON("w"
+ << "majority")));
ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
return BSON("ok" << 1);
@@ -146,8 +147,7 @@ protected:
ASSERT_BSONOBJ_EQ(request.cmdObj,
BSON("setFeatureCompatibilityVersion"
<< "4.2"
- << "writeConcern"
- << writeConcern));
+ << "writeConcern" << writeConcern));
return response;
});
@@ -315,18 +315,16 @@ protected:
* describing the addShard request for 'addedShard'.
*/
void assertChangeWasLogged(const ShardType& addedShard) {
- auto response = assertGet(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{
- ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString("config.changelog"),
- BSON("what"
- << "addShard"
- << "details.name"
- << addedShard.getName()),
- BSONObj(),
- 1));
+ auto response = assertGet(getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString("config.changelog"),
+ BSON("what"
+ << "addShard"
+ << "details.name" << addedShard.getName()),
+ BSONObj(),
+ 1));
ASSERT_EQ(1U, response.docs.size());
auto logEntryBSON = response.docs.front();
auto logEntry = assertGet(ChangeLogType::fromBSON(logEntryBSON));
@@ -347,35 +345,24 @@ protected:
TEST_F(AddShardTest, CreateShardIdentityUpsertForAddShard) {
std::string shardName = "shardName";
- BSONObj expectedBSON = BSON("update"
- << "system.version"
- << "bypassDocumentValidation"
- << false
- << "ordered"
- << true
- << "updates"
- << BSON_ARRAY(BSON(
- "q"
- << BSON("_id"
- << "shardIdentity")
- << "u"
- << BSON("shardName" << shardName << "clusterId" << _clusterId
- << "configsvrConnectionString"
- << replicationCoordinator()
- ->getConfig()
- .getConnectionString()
- .toString())
- << "multi"
- << false
- << "upsert"
- << true))
- << "writeConcern"
- << BSON("w"
- << "majority"
- << "wtimeout"
- << 60000)
- << "allowImplicitCollectionCreation"
- << true);
+ BSONObj expectedBSON = BSON(
+ "update"
+ << "system.version"
+ << "bypassDocumentValidation" << false << "ordered" << true << "updates"
+ << BSON_ARRAY(BSON(
+ "q" << BSON("_id"
+ << "shardIdentity")
+ << "u"
+ << BSON(
+ "shardName"
+ << shardName << "clusterId" << _clusterId << "configsvrConnectionString"
+ << replicationCoordinator()->getConfig().getConnectionString().toString())
+ << "multi" << false << "upsert" << true))
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout" << 60000)
+ << "allowImplicitCollectionCreation" << true);
auto addShardCmd = add_shard_util::createAddShardCmd(operationContext(), shardName);
auto actualBSON = add_shard_util::createShardIdentityUpsertForAddShard(addShardCmd);
ASSERT_BSONOBJ_EQ(expectedBSON, actualBSON);
@@ -427,8 +414,7 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
@@ -508,8 +494,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
@@ -648,8 +633,7 @@ TEST_F(AddShardTest, AddReplicaSetShardAsStandalone) {
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "myOtherSet"
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -706,8 +690,7 @@ TEST_F(AddShardTest, ReplicaSetMistmatchedReplicaSetName) {
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "myOtherSet"
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -735,12 +718,10 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) {
"as a shard since it is a config server");
});
- BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
- << "config"
- << "configsvr"
- << true
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ BSONObj commandResponse =
+ BSON("ok" << 1 << "ismaster" << true << "setName"
+ << "config"
+ << "configsvr" << true << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -772,9 +753,7 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) {
hosts.append("host1:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -808,9 +787,7 @@ TEST_F(AddShardTest, AddShardWithNameConfigFails) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -855,9 +832,7 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -900,9 +875,7 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -966,9 +939,7 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -1049,8 +1020,7 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 0936c9fbb55..4423f7ba458 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -129,8 +129,7 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk
BSON("query" << BSON(ChunkType::ns(chunk.getNS().ns())
<< ChunkType::min(chunk.getMin())
<< ChunkType::max(chunk.getMax()))
- << "orderby"
- << BSON(ChunkType::lastmod() << -1)));
+ << "orderby" << BSON(ChunkType::lastmod() << -1)));
b.append("res",
BSON(ChunkType::epoch(collVersion.epoch())
<< ChunkType::shard(chunk.getShard().toString())));
@@ -146,8 +145,7 @@ Status checkChunkIsOnShard(OperationContext* opCtx,
const ShardId& shard) {
BSONObj chunkQuery =
BSON(ChunkType::ns() << nss.ns() << ChunkType::min() << min << ChunkType::max() << max
- << ChunkType::shard()
- << shard);
+ << ChunkType::shard() << shard);
// Must use local read concern because we're going to perform subsequent writes.
auto findResponseWith =
@@ -166,8 +164,7 @@ Status checkChunkIsOnShard(OperationContext* opCtx,
if (findResponseWith.getValue().docs.empty()) {
return {ErrorCodes::Error(40165),
str::stream()
- << "Could not find the chunk ("
- << chunkQuery.toString()
+ << "Could not find the chunk (" << chunkQuery.toString()
<< ") on the shard. Cannot execute the migration commit with invalid chunks."};
}
@@ -321,13 +318,9 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
if (collVersion.epoch() != requestEpoch) {
return {ErrorCodes::StaleEpoch,
str::stream() << "splitChunk cannot split chunk " << range.toString()
- << ". Collection '"
- << nss.ns()
- << "' was dropped and re-created."
- << " Current epoch: "
- << collVersion.epoch()
- << ", cmd epoch: "
- << requestEpoch};
+ << ". Collection '" << nss.ns() << "' was dropped and re-created."
+ << " Current epoch: " << collVersion.epoch()
+ << ", cmd epoch: " << requestEpoch};
}
// Get the shard version (max chunk version) for the shard requesting the split.
@@ -387,18 +380,14 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
return {
ErrorCodes::InvalidOptions,
str::stream() << "Split keys must be specified in strictly increasing order. Key "
- << endKey
- << " was specified after "
- << startKey
- << "."};
+ << endKey << " was specified after " << startKey << "."};
}
// Verify that splitPoints are not repeated
if (endKey.woCompare(startKey) == 0) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Split on lower bound of chunk "
- << ChunkRange(startKey, endKey).toString()
- << "is not allowed"};
+ << ChunkRange(startKey, endKey).toString() << "is not allowed"};
}
// verify that splits don't create too-big shard keys
@@ -468,10 +457,8 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
b.append("ns", ChunkType::ConfigNS.ns());
b.append("q",
BSON("query" << BSON(ChunkType::ns(nss.ns()) << ChunkType::min() << range.getMin()
- << ChunkType::max()
- << range.getMax())
- << "orderby"
- << BSON(ChunkType::lastmod() << -1)));
+ << ChunkType::max() << range.getMax())
+ << "orderby" << BSON(ChunkType::lastmod() << -1)));
{
BSONObjBuilder bb(b.subobjStart("res"));
bb.append(ChunkType::epoch(), requestEpoch);
@@ -598,10 +585,7 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
ErrorCodes::InvalidOptions,
str::stream()
<< "Chunk boundaries must be specified in strictly increasing order. Boundary "
- << chunkBoundaries[i]
- << " was specified after "
- << itChunk.getMin()
- << "."};
+ << chunkBoundaries[i] << " was specified after " << itChunk.getMin() << "."};
}
itChunk.setMax(chunkBoundaries[i]);
@@ -714,11 +698,9 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
<< "' has been dropped and recreated since the migration began."
" The config server's collection version epoch is now '"
<< currentCollectionVersion.epoch().toString()
- << "', but the shard's is "
- << collectionEpoch.toString()
+ << "', but the shard's is " << collectionEpoch.toString()
<< "'. Aborting migration commit for chunk ("
- << migratedChunk.getRange().toString()
- << ")."};
+ << migratedChunk.getRange().toString() << ")."};
}
// Check that migratedChunk is where it should be, on fromShard.
@@ -762,8 +744,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
if (!newHistory.empty() && newHistory.front().getValidAfter() >= validAfter.get()) {
return {ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "The chunk history for chunk with namespace " << nss.ns()
- << " and min key "
- << migratedChunk.getMin()
+ << " and min key " << migratedChunk.getMin()
<< " is corrupted. The last validAfter "
<< newHistory.back().getValidAfter().toString()
<< " is greater or equal to the new validAfter "
@@ -837,9 +818,7 @@ StatusWith<ChunkType> ShardingCatalogManager::_findChunkOnConfig(OperationContex
if (origChunks.size() != 1) {
return {ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "Tried to find the chunk for namespace " << nss.ns()
- << " and min key "
- << key.toString()
- << ", but found no chunks"};
+ << " and min key " << key.toString() << ", but found no chunks"};
}
return ChunkType::fromConfigBSON(origChunks.front());
@@ -886,9 +865,7 @@ StatusWith<ChunkVersion> ShardingCatalogManager::_findCollectionVersion(
<< "' has been dropped and recreated since the migration began."
" The config server's collection version epoch is now '"
<< currentCollectionVersion.epoch().toString()
- << "', but the shard's is "
- << collectionEpoch.toString()
- << "'."};
+ << "', but the shard's is " << collectionEpoch.toString() << "'."};
}
return currentCollectionVersion;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 5993661a884..2192eaa4599 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -75,9 +75,9 @@
namespace mongo {
using CollectionUUID = UUID;
+using std::set;
using std::string;
using std::vector;
-using std::set;
namespace {
@@ -113,8 +113,8 @@ boost::optional<UUID> checkCollectionOptions(OperationContext* opCtx,
// TODO: SERVER-33048 check idIndex field
uassert(ErrorCodes::NamespaceExists,
- str::stream() << "ns: " << ns.ns() << " already exists with different options: "
- << actualOptions.toBSON(),
+ str::stream() << "ns: " << ns.ns()
+ << " already exists with different options: " << actualOptions.toBSON(),
options.matchesStorageOptions(
actualOptions, CollatorFactoryInterface::get(opCtx->getServiceContext())));
@@ -170,8 +170,7 @@ void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss)
str::stream() << "A previous attempt to shard collection " << nss.ns()
<< " failed after writing some initial chunks to config.chunks. Please "
"manually delete the partially written chunks for collection "
- << nss.ns()
- << " from config.chunks",
+ << nss.ns() << " from config.chunks",
numChunks == 0);
}
@@ -432,7 +431,7 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
optimizationType,
treatAsEmpty,
1 // numContiguousChunksPerShard
- );
+ );
} else {
initialChunks = InitialSplitPolicy::createFirstChunksUnoptimized(
opCtx, nss, fieldsAndOrder, dbPrimaryShardId);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index 3a408ea6090..11091ef8957 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -91,10 +91,7 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
uassert(ErrorCodes::DatabaseDifferCase,
str::stream() << "can't have 2 databases that just differ on case "
- << " have: "
- << actualDbName
- << " want to add: "
- << dbName,
+ << " have: " << actualDbName << " want to add: " << dbName,
actualDbName == dbName);
// We did a local read of the database entry above and found that the database already
@@ -264,8 +261,7 @@ Status ShardingCatalogManager::commitMovePrimary(OperationContext* opCtx,
// are holding the dist lock during the movePrimary operation.
uassert(ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "Tried to update primary shard for database '" << dbname
- << " with version "
- << currentDatabaseVersion.getLastMod(),
+ << " with version " << currentDatabaseVersion.getLastMod(),
updateStatus.getValue());
// Ensure the next attempt to retrieve the database or any of its collections will do a full
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
index 8cd076b9c28..825236b9575 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
@@ -149,15 +149,13 @@ TEST_F(EnableShardingTest, dbExistsInvalidFormat) {
setupShards(vector<ShardType>{shard});
// Set up database with bad type for primary field.
- ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- DatabaseType::ConfigNS,
- BSON("_id"
- << "db6"
- << "primary"
- << 12
- << "partitioned"
- << false),
- ShardingCatalogClient::kMajorityWriteConcern));
+ ASSERT_OK(
+ catalogClient()->insertConfigDocument(operationContext(),
+ DatabaseType::ConfigNS,
+ BSON("_id"
+ << "db6"
+ << "primary" << 12 << "partitioned" << false),
+ ShardingCatalogClient::kMajorityWriteConcern));
ASSERT_THROWS_CODE(
ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db6"),
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 8e6e2e29423..066405d32b8 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -181,19 +181,17 @@ StatusWith<Shard::CommandResponse> ShardingCatalogManager::_runCommandForAddShar
Status commandStatus = getStatusFromCommandResult(result);
if (!Shard::shouldErrorBePropagated(commandStatus.code())) {
- commandStatus = {ErrorCodes::OperationFailed,
- str::stream() << "failed to run command " << cmdObj
- << " when attempting to add shard "
- << targeter->connectionString().toString()
- << causedBy(commandStatus)};
+ commandStatus = {
+ ErrorCodes::OperationFailed,
+ str::stream() << "failed to run command " << cmdObj << " when attempting to add shard "
+ << targeter->connectionString().toString() << causedBy(commandStatus)};
}
Status writeConcernStatus = getWriteConcernStatusFromCommandResult(result);
if (!Shard::shouldErrorBePropagated(writeConcernStatus.code())) {
writeConcernStatus = {ErrorCodes::OperationFailed,
str::stream() << "failed to satisfy writeConcern for command "
- << cmdObj
- << " when attempting to add shard "
+ << cmdObj << " when attempting to add shard "
<< targeter->connectionString().toString()
<< causedBy(writeConcernStatus)};
}
@@ -257,8 +255,7 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExis
} else {
return {ErrorCodes::IllegalOperation,
str::stream() << "A shard already exists containing the replica set '"
- << existingShardConnStr.getSetName()
- << "'"};
+ << existingShardConnStr.getSetName() << "'"};
}
}
@@ -277,10 +274,8 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExis
return {ErrorCodes::IllegalOperation,
str::stream() << "'" << addingHost.toString() << "' "
<< "is already a member of the existing shard '"
- << existingShard.getHost()
- << "' ("
- << existingShard.getName()
- << ")."};
+ << existingShard.getHost() << "' ("
+ << existingShard.getName() << ")."};
}
}
}
@@ -340,8 +335,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!status.isOK()) {
return status.withContext(str::stream() << "isMaster returned invalid 'maxWireVersion' "
<< "field when attempting to add "
- << connectionString.toString()
- << " as a shard");
+ << connectionString.toString() << " as a shard");
}
if (serverGlobalParams.featureCompatibility.getVersion() >
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40) {
@@ -362,8 +356,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!status.isOK()) {
return status.withContext(str::stream() << "isMaster returned invalid 'ismaster' "
<< "field when attempting to add "
- << connectionString.toString()
- << " as a shard");
+ << connectionString.toString() << " as a shard");
}
if (!isMaster) {
return {ErrorCodes::NotMaster,
@@ -387,8 +380,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!providedSetName.empty() && foundSetName.empty()) {
return {ErrorCodes::OperationFailed,
str::stream() << "host did not return a set name; "
- << "is the replica set still initializing? "
- << resIsMaster};
+ << "is the replica set still initializing? " << resIsMaster};
}
// Make sure the set name specified in the connection string matches the one where its hosts
@@ -396,8 +388,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!providedSetName.empty() && (providedSetName != foundSetName)) {
return {ErrorCodes::OperationFailed,
str::stream() << "the provided connection string (" << connectionString.toString()
- << ") does not match the actual set name "
- << foundSetName};
+ << ") does not match the actual set name " << foundSetName};
}
// Is it a config server?
@@ -437,11 +428,8 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (hostSet.find(host) == hostSet.end()) {
return {ErrorCodes::OperationFailed,
str::stream() << "in seed list " << connectionString.toString() << ", host "
- << host
- << " does not belong to replica set "
- << foundSetName
- << "; found "
- << resIsMaster.toString()};
+ << host << " does not belong to replica set " << foundSetName
+ << "; found " << resIsMaster.toString()};
}
}
}
@@ -611,13 +599,9 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
const auto& dbDoc = dbt.getValue().value;
return Status(ErrorCodes::OperationFailed,
str::stream() << "can't add shard "
- << "'"
- << shardConnectionString.toString()
- << "'"
- << " because a local database '"
- << dbName
- << "' exists in another "
- << dbDoc.getPrimary());
+ << "'" << shardConnectionString.toString() << "'"
+ << " because a local database '" << dbName
+ << "' exists in another " << dbDoc.getPrimary());
} else if (dbt != ErrorCodes::NamespaceNotFound) {
return dbt.getStatus();
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
index b1b7b0d9adb..9b5b8eb0f8a 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
@@ -154,17 +154,13 @@ StatusWith<ChunkRange> includeFullShardKey(OperationContext* opCtx,
if (!range.getMin().isFieldNamePrefixOf(shardKeyBSON)) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "min: " << range.getMin() << " is not a prefix of the shard key "
- << shardKeyBSON
- << " of ns: "
- << nss.ns()};
+ << shardKeyBSON << " of ns: " << nss.ns()};
}
if (!range.getMax().isFieldNamePrefixOf(shardKeyBSON)) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "max: " << range.getMax() << " is not a prefix of the shard key "
- << shardKeyBSON
- << " of ns: "
- << nss.ns()};
+ << shardKeyBSON << " of ns: " << nss.ns()};
}
return ChunkRange(shardKeyPattern.extendRangeBound(range.getMin(), false),
diff --git a/src/mongo/db/s/config_server_op_observer_test.cpp b/src/mongo/db/s/config_server_op_observer_test.cpp
index fc5ff24708d..eca0a3a19b5 100644
--- a/src/mongo/db/s/config_server_op_observer_test.cpp
+++ b/src/mongo/db/s/config_server_op_observer_test.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/db/s/config_server_op_observer.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/config_server_op_observer.h"
#include "mongo/s/cluster_identity_loader.h"
#include "mongo/s/config_server_test_fixture.h"
#include "mongo/unittest/death_test.h"
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 0a808e8daac..75ea7635773 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -79,16 +79,13 @@ void mergeChunks(OperationContext* opCtx,
const BSONObj& minKey,
const BSONObj& maxKey,
const OID& epoch) {
- const std::string whyMessage = str::stream() << "merging chunks in " << nss.ns() << " from "
- << minKey << " to " << maxKey;
+ const std::string whyMessage = str::stream()
+ << "merging chunks in " << nss.ns() << " from " << minKey << " to " << maxKey;
auto scopedDistLock = uassertStatusOKWithContext(
Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout),
str::stream() << "could not acquire collection lock for " << nss.ns()
- << " to merge chunks in ["
- << redact(minKey)
- << ", "
- << redact(maxKey)
+ << " to merge chunks in [" << redact(minKey) << ", " << redact(maxKey)
<< ")");
auto const shardingState = ShardingState::get(opCtx);
@@ -109,20 +106,14 @@ void mergeChunks(OperationContext* opCtx,
const auto shardVersion = metadata->getShardVersion();
uassert(ErrorCodes::StaleEpoch,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " has changed since merge was sent (sent epoch: "
- << epoch.toString()
- << ", current epoch: "
- << shardVersion.epoch()
- << ")",
+ << " has changed since merge was sent (sent epoch: " << epoch.toString()
+ << ", current epoch: " << shardVersion.epoch() << ")",
shardVersion.epoch() == epoch);
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, the range "
- << redact(ChunkRange(minKey, maxKey).toString())
- << " is not valid"
- << " for collection "
- << nss.ns()
- << " with key pattern "
+ << redact(ChunkRange(minKey, maxKey).toString()) << " is not valid"
+ << " for collection " << nss.ns() << " with key pattern "
<< metadata->getKeyPattern().toString(),
metadata->isValidKey(minKey) && metadata->isValidKey(maxKey));
@@ -145,11 +136,8 @@ void mergeChunks(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " range starting at "
- << redact(minKey)
- << " and ending at "
- << redact(maxKey)
- << " does not belong to shard "
+ << " range starting at " << redact(minKey) << " and ending at "
+ << redact(maxKey) << " does not belong to shard "
<< shardingState->shardId(),
!chunksToMerge.empty());
@@ -164,9 +152,7 @@ void mergeChunks(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " range starting at "
- << redact(minKey)
- << " does not belong to shard "
+ << " range starting at " << redact(minKey) << " does not belong to shard "
<< shardingState->shardId(),
minKeyInRange);
@@ -177,9 +163,7 @@ void mergeChunks(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " range ending at "
- << redact(maxKey)
- << " does not belong to shard "
+ << " range ending at " << redact(maxKey) << " does not belong to shard "
<< shardingState->shardId(),
maxKeyInRange);
@@ -205,11 +189,8 @@ void mergeChunks(OperationContext* opCtx,
uassert(
ErrorCodes::IllegalOperation,
str::stream()
- << "could not merge chunks, collection "
- << nss.ns()
- << " has a hole in the range "
- << ChunkRange(minKey, maxKey).toString()
- << " at "
+ << "could not merge chunks, collection " << nss.ns() << " has a hole in the range "
+ << ChunkRange(minKey, maxKey).toString() << " at "
<< ChunkRange(chunksToMerge[i - 1].getMax(), chunksToMerge[i].getMin()).toString(),
chunksToMerge[i - 1].getMax().woCompare(chunksToMerge[i].getMin()) == 0);
}
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 7394d7dae15..5f48778deca 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -144,7 +144,7 @@ void scheduleCleanup(executor::TaskExecutor* executor,
Date_t when) {
LOG(1) << "Scheduling cleanup on " << nss.ns() << " at " << when;
auto swCallbackHandle = executor->scheduleWorkAt(
- when, [ executor, nss = std::move(nss), epoch = std::move(epoch) ](auto& args) {
+ when, [executor, nss = std::move(nss), epoch = std::move(epoch)](auto& args) {
auto& status = args.status;
if (ErrorCodes::isCancelationError(status.code())) {
return;
@@ -230,11 +230,11 @@ MetadataManager::~MetadataManager() {
}
void MetadataManager::_clearAllCleanups(WithLock lock) {
- _clearAllCleanups(
- lock,
- {ErrorCodes::InterruptedDueToReplStateChange,
- str::stream() << "Range deletions in " << _nss.ns()
- << " abandoned because collection was dropped or became unsharded"});
+ _clearAllCleanups(lock,
+ {ErrorCodes::InterruptedDueToReplStateChange,
+ str::stream()
+ << "Range deletions in " << _nss.ns()
+ << " abandoned because collection was dropped or became unsharded"});
}
void MetadataManager::_clearAllCleanups(WithLock, Status status) {
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 391fb17c937..70c936164f0 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -328,8 +328,7 @@ Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate(
return {ErrorCodes::OperationIncomplete,
str::stream() << "Unable to enter critical section because the recipient "
"shard thinks all data is cloned while there are still "
- << cloneLocsRemaining
- << " documents remaining"};
+ << cloneLocsRemaining << " documents remaining"};
}
return Status::OK();
@@ -746,8 +745,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
if (!idx) {
return {ErrorCodes::IndexNotFound,
str::stream() << "can't find index with prefix " << _shardKeyPattern.toBSON()
- << " in storeCurrentLocs for "
- << _args.getNss().ns()};
+ << " in storeCurrentLocs for " << _args.getNss().ns()};
}
// Assume both min and max non-empty, append MinKey's to make them fit chosen index
@@ -819,19 +817,10 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
return {
ErrorCodes::ChunkTooBig,
str::stream() << "Cannot move chunk: the maximum number of documents for a chunk is "
- << maxRecsWhenFull
- << ", the maximum chunk size is "
- << _args.getMaxChunkSizeBytes()
- << ", average document size is "
- << avgRecSize
- << ". Found "
- << recCount
- << " documents in chunk "
- << " ns: "
- << _args.getNss().ns()
- << " "
- << _args.getMinKey()
- << " -> "
+ << maxRecsWhenFull << ", the maximum chunk size is "
+ << _args.getMaxChunkSizeBytes() << ", average document size is "
+ << avgRecSize << ". Found " << recCount << " documents in chunk "
+ << " ns: " << _args.getNss().ns() << " " << _args.getMinKey() << " -> "
<< _args.getMaxKey()};
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
index 2a751999ca4..f7e325bfafc 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
@@ -86,8 +86,8 @@ public:
invariant(_chunkCloner);
} else {
uasserted(ErrorCodes::IllegalOperation,
- str::stream() << "No active migrations were found for collection "
- << nss->ns());
+ str::stream()
+ << "No active migrations were found for collection " << nss->ns());
}
}
@@ -317,9 +317,7 @@ public:
auto rollbackId = repl::ReplicationProcess::get(opCtx)->getRollbackID();
uassert(50881,
str::stream() << "rollback detected, rollbackId was "
- << rollbackIdAtMigrationInit
- << " but is now "
- << rollbackId,
+ << rollbackIdAtMigrationInit << " but is now " << rollbackId,
rollbackId == rollbackIdAtMigrationInit);
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 481b0d2a707..75dd569f264 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -436,8 +436,7 @@ Status MigrationDestinationManager::abort(const MigrationSessionId& sessionId) {
if (!_sessionId->matches(sessionId)) {
return {ErrorCodes::CommandFailed,
str::stream() << "received abort request from a stale session "
- << sessionId.toString()
- << ". Current session is "
+ << sessionId.toString() << ". Current session is "
<< _sessionId->toString()};
}
@@ -462,8 +461,7 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio
if (_state != STEADY) {
return {ErrorCodes::CommandFailed,
str::stream() << "Migration startCommit attempted when not in STEADY state."
- << " Sender's session is "
- << sessionId.toString()
+ << " Sender's session is " << sessionId.toString()
<< (_sessionId ? (". Current session is " + _sessionId->toString())
: ". No active session on this shard.")};
}
@@ -477,8 +475,7 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio
if (!_sessionId->matches(sessionId)) {
return {ErrorCodes::CommandFailed,
str::stream() << "startCommit received commit request from a stale session "
- << sessionId.toString()
- << ". Current session is "
+ << sessionId.toString() << ". Current session is "
<< _sessionId->toString()};
}
@@ -550,9 +547,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
auto infos = infosRes.docs;
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "expected listCollections against the primary shard for "
- << nss.toString()
- << " to return 1 entry, but got "
- << infos.size()
+ << nss.toString() << " to return 1 entry, but got " << infos.size()
<< " entries",
infos.size() == 1);
@@ -574,8 +569,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
uassert(ErrorCodes::InvalidUUID,
str::stream() << "The donor shard did not return a UUID for collection " << nss.ns()
- << " as part of its listCollections response: "
- << entry
+ << " as part of its listCollections response: " << entry
<< ", but this node expects to see a UUID.",
!info["uuid"].eoo());
@@ -602,11 +596,9 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
uassert(ErrorCodes::InvalidUUID,
str::stream()
- << "Cannot create collection "
- << nss.ns()
+ << "Cannot create collection " << nss.ns()
<< " because we already have an identically named collection with UUID "
- << collection->uuid()
- << ", which differs from the donor's UUID "
+ << collection->uuid() << ", which differs from the donor's UUID "
<< (donorUUID ? donorUUID->toString() : "(none)")
<< ". Manually drop the collection on this shard if it contains data from "
"a previous incarnation of "
@@ -622,10 +614,10 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
if (!indexSpecs.empty()) {
// Only allow indexes to be copied if the collection does not have any documents.
uassert(ErrorCodes::CannotCreateCollection,
- str::stream() << "aborting, shard is missing " << indexSpecs.size()
- << " indexes and "
- << "collection is not empty. Non-trivial "
- << "index creation should be scheduled manually",
+ str::stream()
+ << "aborting, shard is missing " << indexSpecs.size() << " indexes and "
+ << "collection is not empty. Non-trivial "
+ << "index creation should be scheduled manually",
collection->numRecords(opCtx) == 0);
}
return indexSpecs;
@@ -1152,10 +1144,9 @@ CollectionShardingRuntime::CleanupNotification MigrationDestinationManager::_not
if (!optMetadata || !(*optMetadata)->isSharded() ||
(*optMetadata)->getCollVersion().epoch() != _epoch) {
return Status{ErrorCodes::StaleShardVersion,
- str::stream() << "Not marking chunk " << redact(range.toString())
- << " as pending because the epoch of "
- << _nss.ns()
- << " changed"};
+ str::stream()
+ << "Not marking chunk " << redact(range.toString())
+ << " as pending because the epoch of " << _nss.ns() << " changed"};
}
// Start clearing any leftovers that would be in the new chunk
diff --git a/src/mongo/db/s/migration_session_id.cpp b/src/mongo/db/s/migration_session_id.cpp
index d2cfeab3254..7049a0870cf 100644
--- a/src/mongo/db/s/migration_session_id.cpp
+++ b/src/mongo/db/s/migration_session_id.cpp
@@ -53,8 +53,8 @@ MigrationSessionId MigrationSessionId::generate(StringData donor, StringData rec
invariant(!donor.empty());
invariant(!recipient.empty());
- return MigrationSessionId(str::stream() << donor << "_" << recipient << "_"
- << OID::gen().toString());
+ return MigrationSessionId(str::stream()
+ << donor << "_" << recipient << "_" << OID::gen().toString());
}
StatusWith<MigrationSessionId> MigrationSessionId::extractFromBSON(const BSONObj& obj) {
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index e292320ba53..022df3b0745 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -190,10 +190,8 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
uassert(ErrorCodes::StaleEpoch,
str::stream() << "cannot move chunk " << _args.toString()
<< " because collection may have been dropped. "
- << "current epoch: "
- << collectionVersion.epoch()
- << ", cmd epoch: "
- << _args.getVersionEpoch(),
+ << "current epoch: " << collectionVersion.epoch()
+ << ", cmd epoch: " << _args.getVersionEpoch(),
_args.getVersionEpoch() == collectionVersion.epoch());
ChunkType chunkToMove;
@@ -228,9 +226,7 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) {
"moveChunk.start",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
+ << _args.getFromShardId() << "to" << _args.getToShardId()),
ShardingCatalogClient::kMajorityWriteConcern);
if (logStatus != Status::OK()) {
return logStatus;
@@ -452,9 +448,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
"moveChunk.validating",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
+ << _args.getFromShardId() << "to" << _args.getToShardId()),
ShardingCatalogClient::kMajorityWriteConcern);
if ((ErrorCodes::isInterruption(status.code()) ||
@@ -487,12 +481,11 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
}
fassert(40137,
- status.withContext(
- str::stream() << "Failed to commit migration for chunk " << _args.toString()
- << " due to "
- << redact(migrationCommitStatus)
- << ". Updating the optime with a write before refreshing the "
- << "metadata also failed"));
+ status.withContext(str::stream()
+ << "Failed to commit migration for chunk " << _args.toString()
+ << " due to " << redact(migrationCommitStatus)
+ << ". Updating the optime with a write before refreshing the "
+ << "metadata also failed"));
}
// Do a best effort attempt to incrementally refresh the metadata before leaving the critical
@@ -524,8 +517,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
return migrationCommitStatus.withContext(
str::stream() << "Orphaned range not cleaned up. Failed to refresh metadata after"
" migration commit due to '"
- << refreshStatus.toString()
- << "' after commit failed");
+ << refreshStatus.toString() << "' after commit failed");
}
const auto refreshedMetadata = _getCurrentMetadataAndCheckEpoch(opCtx);
@@ -569,10 +561,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
"moveChunk.commit",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()
- << "counts"
+ << _args.getFromShardId() << "to" << _args.getToShardId() << "counts"
<< _recipientCloneCounts),
ShardingCatalogClient::kMajorityWriteConcern);
@@ -632,9 +621,7 @@ void MigrationSourceManager::cleanupOnError(OperationContext* opCtx) {
"moveChunk.error",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
+ << _args.getFromShardId() << "to" << _args.getToShardId()),
ShardingCatalogClient::kMajorityWriteConcern);
try {
@@ -661,8 +648,7 @@ ScopedCollectionMetadata MigrationSourceManager::_getCurrentMetadataAndCheckEpoc
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "The collection was dropped or recreated since the migration began. "
- << "Expected collection epoch: "
- << _collectionEpoch.toString()
+ << "Expected collection epoch: " << _collectionEpoch.toString()
<< ", but found: "
<< (metadata->isSharded() ? metadata->getCollVersion().epoch().toString()
: "unsharded collection."),
@@ -684,9 +670,7 @@ void MigrationSourceManager::_notifyChangeStreamsOnRecipientFirstChunk(
// The message expected by change streams
const auto o2Message = BSON("type"
<< "migrateChunkToNewShard"
- << "from"
- << _args.getFromShardId()
- << "to"
+ << "from" << _args.getFromShardId() << "to"
<< _args.getToShardId());
auto const serviceContext = opCtx->getClient()->getServiceContext();
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index ac20cb2f350..a66109e73ba 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -45,7 +45,7 @@ const char kDestinationShard[] = "destination";
const char kIsDonorShard[] = "isDonorShard";
const char kChunk[] = "chunk";
const char kCollection[] = "collection";
-}
+} // namespace
BSONObj makeMigrationStatusDocument(const NamespaceString& nss,
const ShardId& fromShard,
diff --git a/src/mongo/db/s/migration_util.h b/src/mongo/db/s/migration_util.h
index dc2469d8602..67b59761477 100644
--- a/src/mongo/db/s/migration_util.h
+++ b/src/mongo/db/s/migration_util.h
@@ -56,6 +56,6 @@ BSONObj makeMigrationStatusDocument(const NamespaceString& nss,
const BSONObj& min,
const BSONObj& max);
-} // namespace shardutil
+} // namespace migrationutil
} // namespace mongo
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index dd62c984292..8fafb8c0253 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -149,8 +149,8 @@ public:
} catch (const std::exception& e) {
scopedMigration.signalComplete(
{ErrorCodes::InternalError,
- str::stream() << "Severe error occurred while running moveChunk command: "
- << e.what()});
+ str::stream()
+ << "Severe error occurred while running moveChunk command: " << e.what()});
throw;
}
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index aff155b3bb2..409bbb5d94c 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -300,8 +300,7 @@ Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
fassert(50762,
validateStatus.withContext(
str::stream() << "Failed to commit movePrimary for database " << getNss().ns()
- << " due to "
- << redact(commitStatus)
+ << " due to " << redact(commitStatus)
<< ". Updating the optime with a write before clearing the "
<< "version also failed"));
diff --git a/src/mongo/db/s/scoped_operation_completion_sharding_actions.h b/src/mongo/db/s/scoped_operation_completion_sharding_actions.h
index de61f5fbfd2..baea9099032 100644
--- a/src/mongo/db/s/scoped_operation_completion_sharding_actions.h
+++ b/src/mongo/db/s/scoped_operation_completion_sharding_actions.h
@@ -37,7 +37,7 @@ namespace mongo {
* This class has a destructor that handles rerouting exceptions that might have occurred
* during an operation. For this reason, there should be only one instance of this object
* on the chain of one OperationContext.
-*/
+ */
class OperationContext;
class ScopedOperationCompletionShardingActions : public PolymorphicScoped {
diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp
index 1482462fce9..18e16428f63 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination.cpp
@@ -89,10 +89,8 @@ void setPrePostImageTs(const ProcessOplogResult& lastResult, repl::MutableOplogE
if (!lastResult.isPrePostImage) {
uassert(40628,
str::stream() << "expected oplog with ts: " << entry->getTimestamp().toString()
- << " to not have "
- << repl::OplogEntryBase::kPreImageOpTimeFieldName
- << " or "
- << repl::OplogEntryBase::kPostImageOpTimeFieldName,
+ << " to not have " << repl::OplogEntryBase::kPreImageOpTimeFieldName
+ << " or " << repl::OplogEntryBase::kPostImageOpTimeFieldName,
!entry->getPreImageOpTime() && !entry->getPostImageOpTime());
return;
}
@@ -102,14 +100,11 @@ void setPrePostImageTs(const ProcessOplogResult& lastResult, repl::MutableOplogE
uassert(40629,
str::stream() << "expected oplog with ts: " << entry->getTimestamp().toString() << ": "
<< redact(entry->toBSON())
- << " to have session: "
- << lastResult.sessionId,
+ << " to have session: " << lastResult.sessionId,
lastResult.sessionId == entry->getSessionId());
uassert(40630,
str::stream() << "expected oplog with ts: " << entry->getTimestamp().toString() << ": "
- << redact(entry->toBSON())
- << " to have txnNumber: "
- << lastResult.txnNum,
+ << redact(entry->toBSON()) << " to have txnNumber: " << lastResult.txnNum,
lastResult.txnNum == entry->getTxnNumber());
if (entry->getPreImageOpTime()) {
@@ -119,11 +114,8 @@ void setPrePostImageTs(const ProcessOplogResult& lastResult, repl::MutableOplogE
} else {
uasserted(40631,
str::stream() << "expected oplog with opTime: " << entry->getOpTime().toString()
- << ": "
- << redact(entry->toBSON())
- << " to have either "
- << repl::OplogEntryBase::kPreImageOpTimeFieldName
- << " or "
+ << ": " << redact(entry->toBSON()) << " to have either "
+ << repl::OplogEntryBase::kPreImageOpTimeFieldName << " or "
<< repl::OplogEntryBase::kPostImageOpTimeFieldName);
}
}
@@ -142,20 +134,17 @@ repl::MutableOplogEntry parseOplog(const BSONObj& oplogBSON) {
uassert(ErrorCodes::UnsupportedFormat,
str::stream() << "oplog with opTime " << oplogEntry.getTimestamp().toString()
- << " does not have sessionId: "
- << redact(oplogBSON),
+ << " does not have sessionId: " << redact(oplogBSON),
sessionInfo.getSessionId());
uassert(ErrorCodes::UnsupportedFormat,
str::stream() << "oplog with opTime " << oplogEntry.getTimestamp().toString()
- << " does not have txnNumber: "
- << redact(oplogBSON),
+ << " does not have txnNumber: " << redact(oplogBSON),
sessionInfo.getTxnNumber());
uassert(ErrorCodes::UnsupportedFormat,
str::stream() << "oplog with opTime " << oplogEntry.getTimestamp().toString()
- << " does not have stmtId: "
- << redact(oplogBSON),
+ << " does not have stmtId: " << redact(oplogBSON),
oplogEntry.getStatementId());
return oplogEntry;
@@ -225,9 +214,7 @@ ProcessOplogResult processSessionOplog(const BSONObj& oplogBSON,
uassert(40632,
str::stream() << "Can't handle 2 pre/post image oplog in a row. Prevoius oplog "
<< lastResult.oplogTime.getTimestamp().toString()
- << ", oplog ts: "
- << oplogEntry.getTimestamp().toString()
- << ": "
+ << ", oplog ts: " << oplogEntry.getTimestamp().toString() << ": "
<< oplogBSON,
!lastResult.isPrePostImage);
}
@@ -295,9 +282,7 @@ ProcessOplogResult processSessionOplog(const BSONObj& oplogBSON,
const auto& oplogOpTime = result.oplogTime;
uassert(40633,
str::stream() << "Failed to create new oplog entry for oplog with opTime: "
- << oplogEntry.getOpTime().toString()
- << ": "
- << redact(oplogBSON),
+ << oplogEntry.getOpTime().toString() << ": " << redact(oplogBSON),
!oplogOpTime.isNull());
// Do not call onWriteOpCompletedOnPrimary if we inserted a pre/post image, because the
diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp
index 94e052851ca..f645174986d 100644
--- a/src/mongo/db/s/session_catalog_migration_source.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source.cpp
@@ -260,8 +260,9 @@ bool SessionCatalogMigrationSource::_handleWriteHistory(WithLock, OperationConte
// Skip the rest of the chain for this session since the ns is unrelated with the
// current one being migrated. It is ok to not check the rest of the chain because
// retryable writes doesn't allow touching different namespaces.
- if (!nextStmtId || (nextStmtId && *nextStmtId != kIncompleteHistoryStmtId &&
- nextOplog->getNss() != _ns)) {
+ if (!nextStmtId ||
+ (nextStmtId && *nextStmtId != kIncompleteHistoryStmtId &&
+ nextOplog->getNss() != _ns)) {
_currentOplogIterator.reset();
return false;
}
@@ -420,8 +421,7 @@ boost::optional<repl::OplogEntry> SessionCatalogMigrationSource::SessionOplogIte
uassert(40656,
str::stream() << "rollback detected, rollbackId was " << _initialRollbackId
- << " but is now "
- << rollbackId,
+ << " but is now " << rollbackId,
rollbackId == _initialRollbackId);
// If the rollbackId hasn't changed, and this record corresponds to a retryable write,
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
index dd03e31b206..10564146ca4 100644
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ b/src/mongo/db/s/set_shard_version_command.cpp
@@ -164,8 +164,7 @@ public:
const auto storedShardName = shardingState->shardId().toString();
uassert(ErrorCodes::BadValue,
str::stream() << "received shardName " << shardName
- << " which differs from stored shardName "
- << storedShardName,
+ << " which differs from stored shardName " << storedShardName,
storedShardName == shardName);
// Validate config connection string parameter.
@@ -184,8 +183,7 @@ public:
Grid::get(opCtx)->shardRegistry()->getConfigServerConnectionString();
uassert(ErrorCodes::IllegalOperation,
str::stream() << "Given config server set name: " << givenConnStr.getSetName()
- << " differs from known set name: "
- << storedConnStr.getSetName(),
+ << " differs from known set name: " << storedConnStr.getSetName(),
givenConnStr.getSetName() == storedConnStr.getSetName());
// Validate namespace parameter.
@@ -366,11 +364,11 @@ public:
if (!status.isOK()) {
// The reload itself was interrupted or confused here
- errmsg = str::stream() << "could not refresh metadata for " << nss.ns()
- << " with requested shard version "
- << requestedVersion.toString()
- << ", stored shard version is " << currVersion.toString()
- << causedBy(redact(status));
+ errmsg = str::stream()
+ << "could not refresh metadata for " << nss.ns()
+ << " with requested shard version " << requestedVersion.toString()
+ << ", stored shard version is " << currVersion.toString()
+ << causedBy(redact(status));
warning() << errmsg;
diff --git a/src/mongo/db/s/shard_key_util.cpp b/src/mongo/db/s/shard_key_util.cpp
index a056fcd3232..fef5707c039 100644
--- a/src/mongo/db/s/shard_key_util.cpp
+++ b/src/mongo/db/s/shard_key_util.cpp
@@ -113,9 +113,7 @@ void validateShardKeyAgainstExistingIndexes(OperationContext* opCtx,
bool isUnique = idx["unique"].trueValue();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection '" << nss.ns() << "' with unique index on "
- << currentKey
- << " and proposed shard key "
- << proposedKey
+ << currentKey << " and proposed shard key " << proposedKey
<< ". Uniqueness can't be maintained unless shard key is a prefix",
!isUnique || shardKeyPattern.isUniqueIndexCompatible(currentKey));
}
@@ -133,8 +131,7 @@ void validateShardKeyAgainstExistingIndexes(OperationContext* opCtx,
// per field per collection.
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection " << nss.ns()
- << " with hashed shard key "
- << proposedKey
+ << " with hashed shard key " << proposedKey
<< " because the hashed index uses a non-default seed of "
<< idx["seed"].numberInt(),
!shardKeyPattern.isHashedPattern() || idx["seed"].eoo() ||
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index 86bf071f3ac..110cecee0bb 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -103,8 +103,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
maxCollVersion.incMajor();
BSONObj shardChunk =
BSON(ChunkType::minShardID(mins[i])
- << ChunkType::max(maxs[i])
- << ChunkType::shard(kShardId.toString())
+ << ChunkType::max(maxs[i]) << ChunkType::shard(kShardId.toString())
<< ChunkType::lastmod(Date_t::fromMillisSinceEpoch(maxCollVersion.toLong())));
chunks.push_back(
@@ -144,8 +143,8 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
try {
DBDirectClient client(operationContext());
for (auto& chunk : chunks) {
- Query query(BSON(ChunkType::minShardID() << chunk.getMin() << ChunkType::max()
- << chunk.getMax()));
+ Query query(BSON(ChunkType::minShardID()
+ << chunk.getMin() << ChunkType::max() << chunk.getMax()));
query.readPref(ReadPreference::Nearest, BSONArray());
std::unique_ptr<DBClientCursor> cursor = client.query(chunkMetadataNss, query, 1);
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index d278e8acba3..c889866bfd1 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -161,9 +161,7 @@ ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const Namespac
}
uassert(ErrorCodes::OperationFailed,
str::stream() << "Failed to read persisted collections entry for collection '"
- << nss.ns()
- << "' due to '"
- << statusWithCollection.getStatus().toString()
+ << nss.ns() << "' due to '" << statusWithCollection.getStatus().toString()
<< "'.",
statusWithCollection.isOK());
@@ -176,9 +174,7 @@ ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const Namespac
statusWithCollection.getValue().getEpoch());
uassert(ErrorCodes::OperationFailed,
str::stream() << "Failed to read highest version persisted chunk for collection '"
- << nss.ns()
- << "' due to '"
- << statusWithChunk.getStatus().toString()
+ << nss.ns() << "' due to '" << statusWithChunk.getStatus().toString()
<< "'.",
statusWithChunk.isOK());
@@ -265,8 +261,8 @@ StatusWith<CollectionAndChangedChunks> getIncompletePersistedMetadataSinceVersio
return CollectionAndChangedChunks();
}
return Status(ErrorCodes::OperationFailed,
- str::stream() << "Failed to load local metadata due to '" << status.toString()
- << "'.");
+ str::stream()
+ << "Failed to load local metadata due to '" << status.toString() << "'.");
}
}
@@ -454,8 +450,8 @@ void ShardServerCatalogCacheLoader::getDatabase(
return std::make_tuple(_role == ReplicaSetRole::Primary, _term);
}();
- _threadPool.schedule([ this, name = dbName.toString(), callbackFn, isPrimary, term ](
- auto status) noexcept {
+ _threadPool.schedule([ this, name = dbName.toString(), callbackFn, isPrimary,
+ term ](auto status) noexcept {
invariant(status);
auto context = _contexts.makeOperationContext(*Client::getCurrent());
@@ -628,19 +624,18 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
}();
auto remoteRefreshFn = [this, nss, catalogCacheSinceVersion, maxLoaderVersion, termScheduled](
- OperationContext* opCtx,
- StatusWith<CollectionAndChangedChunks>
- swCollectionAndChangedChunks) -> StatusWith<CollectionAndChangedChunks> {
-
+ OperationContext* opCtx,
+ StatusWith<CollectionAndChangedChunks> swCollectionAndChangedChunks)
+ -> StatusWith<CollectionAndChangedChunks> {
if (swCollectionAndChangedChunks == ErrorCodes::NamespaceNotFound) {
_ensureMajorityPrimaryAndScheduleCollAndChunksTask(
opCtx,
nss,
collAndChunkTask{swCollectionAndChangedChunks, maxLoaderVersion, termScheduled});
- LOG_CATALOG_REFRESH(1) << "Cache loader remotely refreshed for collection " << nss
- << " from version " << maxLoaderVersion
- << " and no metadata was found.";
+ LOG_CATALOG_REFRESH(1)
+ << "Cache loader remotely refreshed for collection " << nss << " from version "
+ << maxLoaderVersion << " and no metadata was found.";
return swCollectionAndChangedChunks;
}
@@ -651,12 +646,11 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
auto& collAndChunks = swCollectionAndChangedChunks.getValue();
if (collAndChunks.changedChunks.back().getVersion().epoch() != collAndChunks.epoch) {
- return Status{
- ErrorCodes::ConflictingOperationInProgress,
- str::stream() << "Invalid chunks found when reloading '" << nss.toString()
+ return Status{ErrorCodes::ConflictingOperationInProgress,
+ str::stream()
+ << "Invalid chunks found when reloading '" << nss.toString()
<< "' Previous collection epoch was '"
- << collAndChunks.epoch.toString()
- << "', but found a new epoch '"
+ << collAndChunks.epoch.toString() << "', but found a new epoch '"
<< collAndChunks.changedChunks.back().getVersion().epoch().toString()
<< "'. Collection was dropped and recreated."};
}
@@ -733,8 +727,8 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetDatabase(
StringData dbName,
long long termScheduled,
std::function<void(OperationContext*, StatusWith<DatabaseType>)> callbackFn) {
- auto remoteRefreshFn = [ this, name = dbName.toString(), termScheduled ](
- OperationContext * opCtx, StatusWith<DatabaseType> swDatabaseType) {
+ auto remoteRefreshFn = [this, name = dbName.toString(), termScheduled](
+ OperationContext* opCtx, StatusWith<DatabaseType> swDatabaseType) {
if (swDatabaseType == ErrorCodes::NamespaceNotFound) {
_ensureMajorityPrimaryAndScheduleDbTask(
opCtx, name, DBTask{swDatabaseType, termScheduled});
@@ -794,11 +788,12 @@ StatusWith<CollectionAndChangedChunks> ShardServerCatalogCacheLoader::_getLoader
: ("enqueued metadata from " +
enqueued.changedChunks.front().getVersion().toString() + " to " +
enqueued.changedChunks.back().getVersion().toString()))
- << " and " << (persisted.changedChunks.empty()
- ? "no persisted metadata"
- : ("persisted metadata from " +
- persisted.changedChunks.front().getVersion().toString() + " to " +
- persisted.changedChunks.back().getVersion().toString()))
+ << " and "
+ << (persisted.changedChunks.empty()
+ ? "no persisted metadata"
+ : ("persisted metadata from " +
+ persisted.changedChunks.front().getVersion().toString() + " to " +
+ persisted.changedChunks.back().getVersion().toString()))
<< ", GTE cache version " << catalogCacheSinceVersion;
if (!tasksAreEnqueued) {
@@ -909,7 +904,7 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleDbTask(Oper
return;
}
- _threadPool.schedule([ this, name = dbName.toString() ](auto status) {
+ _threadPool.schedule([this, name = dbName.toString()](auto status) {
invariant(status);
_runDbTasks(name);
@@ -996,7 +991,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
}
}
- _threadPool.schedule([ this, name = dbName.toString() ](auto status) {
+ _threadPool.schedule([this, name = dbName.toString()](auto status) {
if (ErrorCodes::isCancelationError(status.code())) {
LOG(0) << "Cache loader failed to schedule a persisted metadata update"
<< " task for namespace '" << name << "' due to '" << redact(status)
@@ -1043,12 +1038,8 @@ void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata(
uassertStatusOKWithContext(
persistCollectionAndChangedChunks(opCtx, nss, *task.collectionAndChangedChunks),
str::stream() << "Failed to update the persisted chunk metadata for collection '"
- << nss.ns()
- << "' from '"
- << task.minQueryVersion.toString()
- << "' to '"
- << task.maxQueryVersion.toString()
- << "'. Will be retried.");
+ << nss.ns() << "' from '" << task.minQueryVersion.toString() << "' to '"
+ << task.maxQueryVersion.toString() << "'. Will be retried.");
LOG_CATALOG_REFRESH(1) << "Successfully updated persisted chunk metadata for collection '"
<< nss << "' from '" << task.minQueryVersion
@@ -1074,15 +1065,13 @@ void ShardServerCatalogCacheLoader::_updatePersistedDbMetadata(OperationContext*
// The database was dropped. The persisted metadata for the collection must be cleared.
uassertStatusOKWithContext(deleteDatabasesEntry(opCtx, dbName),
str::stream() << "Failed to clear persisted metadata for db '"
- << dbName.toString()
- << "'. Will be retried.");
+ << dbName.toString() << "'. Will be retried.");
return;
}
uassertStatusOKWithContext(persistDbVersion(opCtx, *task.dbType),
str::stream() << "Failed to update the persisted metadata for db '"
- << dbName.toString()
- << "'. Will be retried.");
+ << dbName.toString() << "'. Will be retried.");
LOG_CATALOG_REFRESH(1) << "Successfully updated persisted metadata for db "
<< dbName.toString();
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index aa1ec89d5ec..9c58f262692 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -59,8 +59,9 @@ bool isStandaloneOrPrimary(OperationContext* opCtx) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
const bool isReplSet =
replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
- return !isReplSet || (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
- repl::MemberState::RS_PRIMARY);
+ return !isReplSet ||
+ (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
+ repl::MemberState::RS_PRIMARY);
}
/**
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index c3ca8877773..a1a40e20392 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -97,36 +97,36 @@ public:
// Update the shard identy config string
void onConfirmedSet(const State& state) final {
- Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor()->schedule([
- serviceContext = _serviceContext,
- connStr = state.connStr
- ](Status status) {
- if (ErrorCodes::isCancelationError(status.code())) {
- LOG(2) << "Unable to schedule confirmed set update due to " << status;
- return;
- }
- uassertStatusOK(status);
-
- LOG(0) << "Updating config server with confirmed set " << connStr;
- Grid::get(serviceContext)->shardRegistry()->updateReplSetHosts(connStr);
-
- if (MONGO_FAIL_POINT(failUpdateShardIdentityConfigString)) {
- return;
- }
-
- auto configsvrConnStr =
- Grid::get(serviceContext)->shardRegistry()->getConfigServerConnectionString();
-
- // Only proceed if the notification is for the configsvr
- if (configsvrConnStr.getSetName() != connStr.getSetName()) {
- return;
- }
-
- ThreadClient tc("updateShardIdentityConfigString", serviceContext);
- auto opCtx = tc->makeOperationContext();
-
- ShardingInitializationMongoD::updateShardIdentityConfigString(opCtx.get(), connStr);
- });
+ Grid::get(_serviceContext)
+ ->getExecutorPool()
+ ->getFixedExecutor()
+ ->schedule([serviceContext = _serviceContext, connStr = state.connStr](Status status) {
+ if (ErrorCodes::isCancelationError(status.code())) {
+ LOG(2) << "Unable to schedule confirmed set update due to " << status;
+ return;
+ }
+ uassertStatusOK(status);
+
+ LOG(0) << "Updating config server with confirmed set " << connStr;
+ Grid::get(serviceContext)->shardRegistry()->updateReplSetHosts(connStr);
+
+ if (MONGO_FAIL_POINT(failUpdateShardIdentityConfigString)) {
+ return;
+ }
+
+ auto configsvrConnStr =
+ Grid::get(serviceContext)->shardRegistry()->getConfigServerConnectionString();
+
+ // Only proceed if the notification is for the configsvr
+ if (configsvrConnStr.getSetName() != connStr.getSetName()) {
+ return;
+ }
+
+ ThreadClient tc("updateShardIdentityConfigString", serviceContext);
+ auto opCtx = tc->makeOperationContext();
+
+ ShardingInitializationMongoD::updateShardIdentityConfigString(opCtx.get(), connStr);
+ });
}
void onPossibleSet(const State& state) final {
Grid::get(_serviceContext)->shardRegistry()->updateReplSetHosts(state.connStr);
@@ -373,12 +373,14 @@ void initializeGlobalShardingStateForMongoD(OperationContext* opCtx,
auto targeterFactoryPtr = targeterFactory.get();
ShardFactory::BuilderCallable setBuilder = [targeterFactoryPtr](
- const ShardId& shardId, const ConnectionString& connStr) {
+ const ShardId& shardId,
+ const ConnectionString& connStr) {
return std::make_unique<ShardRemote>(shardId, connStr, targeterFactoryPtr->create(connStr));
};
ShardFactory::BuilderCallable masterBuilder = [targeterFactoryPtr](
- const ShardId& shardId, const ConnectionString& connStr) {
+ const ShardId& shardId,
+ const ConnectionString& connStr) {
return std::make_unique<ShardRemote>(shardId, connStr, targeterFactoryPtr->create(connStr));
};
diff --git a/src/mongo/db/s/sharding_initialization_mongod_test.cpp b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
index 21bbe8553ff..5a7a7868259 100644
--- a/src/mongo/db/s/sharding_initialization_mongod_test.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
@@ -183,18 +183,19 @@ TEST_F(ShardingInitializationMongoDTest, InitWhilePreviouslyInErrorStateWillStay
shardIdentity.setShardName(kShardName);
shardIdentity.setClusterId(OID::gen());
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
+ shardingInitialization()->setGlobalInitMethodForTest([](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) {
uasserted(ErrorCodes::ShutdownInProgress, "Not an actual shutdown");
});
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity);
// ShardingState is now in error state, attempting to call it again will still result in error.
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
- FAIL("Should not be invoked!");
- });
+ shardingInitialization()->setGlobalInitMethodForTest(
+ [](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) { FAIL("Should not be invoked!"); });
ASSERT_THROWS_CODE(
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity),
@@ -223,10 +224,10 @@ TEST_F(ShardingInitializationMongoDTest, InitializeAgainWithMatchingShardIdentit
shardIdentity2.setShardName(kShardName);
shardIdentity2.setClusterId(clusterID);
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
- FAIL("Should not be invoked!");
- });
+ shardingInitialization()->setGlobalInitMethodForTest(
+ [](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) { FAIL("Should not be invoked!"); });
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity2);
@@ -256,10 +257,10 @@ TEST_F(ShardingInitializationMongoDTest, InitializeAgainWithMatchingReplSetNameS
shardIdentity2.setShardName(kShardName);
shardIdentity2.setClusterId(clusterID);
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
- FAIL("Should not be invoked!");
- });
+ shardingInitialization()->setGlobalInitMethodForTest(
+ [](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) { FAIL("Should not be invoked!"); });
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity2);
@@ -291,13 +292,9 @@ TEST_F(ShardingInitializationMongoDTest,
storageGlobalParams.readOnly = true;
serverGlobalParams.overrideShardIdentity =
BSON("_id"
- << "shardIdentity"
- << ShardIdentity::kShardNameFieldName
- << kShardName
- << ShardIdentity::kClusterIdFieldName
- << OID::gen()
- << ShardIdentity::kConfigsvrConnectionStringFieldName
- << "invalid");
+ << "shardIdentity" << ShardIdentity::kShardNameFieldName << kShardName
+ << ShardIdentity::kClusterIdFieldName << OID::gen()
+ << ShardIdentity::kConfigsvrConnectionStringFieldName << "invalid");
ASSERT_THROWS_CODE(
shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext()),
@@ -436,10 +433,8 @@ TEST_F(ShardingInitializationMongoDTest,
ScopedSetStandaloneMode standalone(getServiceContext());
BSONObj invalidShardIdentity = BSON("_id"
- << "shardIdentity"
- << ShardIdentity::kShardNameFieldName
- << kShardName
- << ShardIdentity::kClusterIdFieldName
+ << "shardIdentity" << ShardIdentity::kShardNameFieldName
+ << kShardName << ShardIdentity::kClusterIdFieldName
<< OID::gen()
<< ShardIdentity::kConfigsvrConnectionStringFieldName
<< "invalid");
diff --git a/src/mongo/db/s/sharding_logging.cpp b/src/mongo/db/s/sharding_logging.cpp
index 3529a42cfbd..c3d07903ceb 100644
--- a/src/mongo/db/s/sharding_logging.cpp
+++ b/src/mongo/db/s/sharding_logging.cpp
@@ -121,10 +121,10 @@ Status ShardingLogging::_log(OperationContext* opCtx,
const BSONObj& detail,
const WriteConcernOptions& writeConcern) {
Date_t now = Grid::get(opCtx)->getNetwork()->now();
- const std::string serverName = str::stream() << Grid::get(opCtx)->getNetwork()->getHostName()
- << ":" << serverGlobalParams.port;
- const std::string changeId = str::stream() << serverName << "-" << now.toString() << "-"
- << OID::gen();
+ const std::string serverName = str::stream()
+ << Grid::get(opCtx)->getNetwork()->getHostName() << ":" << serverGlobalParams.port;
+ const std::string changeId = str::stream()
+ << serverName << "-" << now.toString() << "-" << OID::gen();
ChangeLogType changeLog;
changeLog.setChangeId(changeId);
@@ -162,9 +162,9 @@ Status ShardingLogging::_createCappedConfigCollection(OperationContext* opCtx,
StringData collName,
int cappedSize,
const WriteConcernOptions& writeConcern) {
- BSONObj createCmd = BSON("create" << collName << "capped" << true << "size" << cappedSize
- << WriteConcernOptions::kWriteConcernField
- << writeConcern.toBSON());
+ BSONObj createCmd =
+ BSON("create" << collName << "capped" << true << "size" << cappedSize
+ << WriteConcernOptions::kWriteConcernField << writeConcern.toBSON());
auto result =
Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp
index e59ed3568f7..e229badedbc 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection.cpp
@@ -124,8 +124,7 @@ void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss)
str::stream() << "A previous attempt to shard collection " << nss.ns()
<< " failed after writing some initial chunks to config.chunks. Please "
"manually delete the partially written chunks for collection "
- << nss.ns()
- << " from config.chunks",
+ << nss.ns() << " from config.chunks",
numChunks == 0);
}
@@ -229,9 +228,7 @@ void createCollectionOrValidateExisting(OperationContext* opCtx,
bool isUnique = idx["unique"].trueValue();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection '" << nss.ns() << "' with unique index on "
- << currentKey
- << " and proposed shard key "
- << proposedKey
+ << currentKey << " and proposed shard key " << proposedKey
<< ". Uniqueness can't be maintained unless shard key is a prefix",
!isUnique || shardKeyPattern.isUniqueIndexCompatible(currentKey));
}
@@ -249,8 +246,7 @@ void createCollectionOrValidateExisting(OperationContext* opCtx,
// per field per collection.
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection " << nss.ns()
- << " with hashed shard key "
- << proposedKey
+ << " with hashed shard key " << proposedKey
<< " because the hashed index uses a non-default seed of "
<< idx["seed"].numberInt(),
!shardKeyPattern.isHashedPattern() || idx["seed"].eoo() ||
@@ -336,9 +332,7 @@ void validateShardKeyAgainstExistingZones(OperationContext* opCtx,
BSONElement tagMaxKeyElement = tagMaxFields.next();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "the min and max of the existing zone " << tag.getMinKey()
- << " -->> "
- << tag.getMaxKey()
- << " have non-matching keys",
+ << " -->> " << tag.getMaxKey() << " have non-matching keys",
tagMinKeyElement.fieldNameStringData() ==
tagMaxKeyElement.fieldNameStringData());
@@ -350,20 +344,15 @@ void validateShardKeyAgainstExistingZones(OperationContext* opCtx,
uassert(ErrorCodes::InvalidOptions,
str::stream() << "the proposed shard key " << proposedKey.toString()
<< " does not match with the shard key of the existing zone "
- << tag.getMinKey()
- << " -->> "
- << tag.getMaxKey(),
+ << tag.getMinKey() << " -->> " << tag.getMaxKey(),
match);
if (ShardKeyPattern::isHashedPatternEl(proposedKeyElement) &&
(tagMinKeyElement.type() != NumberLong || tagMaxKeyElement.type() != NumberLong)) {
uasserted(ErrorCodes::InvalidOptions,
str::stream() << "cannot do hash sharding with the proposed key "
- << proposedKey.toString()
- << " because there exists a zone "
- << tag.getMinKey()
- << " -->> "
- << tag.getMaxKey()
+ << proposedKey.toString() << " because there exists a zone "
+ << tag.getMinKey() << " -->> " << tag.getMaxKey()
<< " whose boundaries are not "
"of type NumberLong");
}
@@ -418,8 +407,7 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx, const Nam
uassert(ErrorCodes::InternalError,
str::stream() << "expected to return a UUID for collection " << nss.ns()
- << " as part of 'info' field but got "
- << res,
+ << " as part of 'info' field but got " << res,
collectionInfo.hasField("uuid"));
return uassertStatusOK(UUID::parse(collectionInfo["uuid"]));
@@ -503,8 +491,7 @@ ShardCollectionTargetState calculateTargetState(OperationContext* opCtx,
if (fromMapReduce) {
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Map reduce with sharded output to a new collection found "
- << nss.ns()
- << " to be non-empty which is not supported.",
+ << nss.ns() << " to be non-empty which is not supported.",
isEmpty);
}
@@ -704,17 +691,21 @@ UUID shardCollection(OperationContext* opCtx,
InitialSplitPolicy::ShardCollectionConfig initialChunks;
boost::optional<ShardCollectionTargetState> targetState;
- auto writeChunkDocumentsAndRefreshShards = [&](
- const ShardCollectionTargetState& targetState,
- const InitialSplitPolicy::ShardCollectionConfig& initialChunks) {
- // Insert chunk documents to config.chunks on the config server.
- writeFirstChunksToConfig(opCtx, initialChunks);
-
- updateShardingCatalogEntryForCollection(
- opCtx, nss, targetState, initialChunks, *request.getCollation(), request.getUnique());
-
- refreshAllShards(opCtx, nss, dbPrimaryShardId, initialChunks.chunks);
- };
+ auto writeChunkDocumentsAndRefreshShards =
+ [&](const ShardCollectionTargetState& targetState,
+ const InitialSplitPolicy::ShardCollectionConfig& initialChunks) {
+ // Insert chunk documents to config.chunks on the config server.
+ writeFirstChunksToConfig(opCtx, initialChunks);
+
+ updateShardingCatalogEntryForCollection(opCtx,
+ nss,
+ targetState,
+ initialChunks,
+ *request.getCollation(),
+ request.getUnique());
+
+ refreshAllShards(opCtx, nss, dbPrimaryShardId, initialChunks.chunks);
+ };
{
// From this point onward the collection can only be read, not written to, so it is safe to
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index be2560efcee..ec8d3e9b530 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -137,15 +137,14 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
//
// TODO(SERVER-25086): Remove distLock acquisition from split chunk
//
- const std::string whyMessage(
- str::stream() << "splitting chunk " << chunkRange.toString() << " in " << nss.toString());
+ const std::string whyMessage(str::stream() << "splitting chunk " << chunkRange.toString()
+ << " in " << nss.toString());
auto scopedDistLock = Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
opCtx, nss.ns(), whyMessage, DistLockManager::kDefaultLockTimeout);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus().withContext(
str::stream() << "could not acquire collection lock for " << nss.toString()
- << " to split chunk "
- << chunkRange.toString());
+ << " to split chunk " << chunkRange.toString());
}
// If the shard key is hashed, then we must make sure that the split points are of type
@@ -157,12 +156,11 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
BSONElement splitKeyElement = it.next();
if (splitKeyElement.type() != NumberLong) {
return {ErrorCodes::CannotSplit,
- str::stream() << "splitChunk cannot split chunk "
- << chunkRange.toString()
- << ", split point "
- << splitKeyElement.toString()
- << " must be of type "
- "NumberLong for hashed shard key patterns"};
+ str::stream()
+ << "splitChunk cannot split chunk " << chunkRange.toString()
+ << ", split point " << splitKeyElement.toString()
+ << " must be of type "
+ "NumberLong for hashed shard key patterns"};
}
}
}
diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp
index eaf91e54c06..76c51e89ca6 100644
--- a/src/mongo/db/s/transaction_coordinator.cpp
+++ b/src/mongo/db/s/transaction_coordinator.cpp
@@ -291,13 +291,13 @@ TransactionCoordinator::TransactionCoordinator(ServiceContext* serviceContext,
return txn::deleteCoordinatorDoc(*_scheduler, _lsid, _txnNumber);
})
- .onCompletion([ this, deadlineFuture = std::move(deadlineFuture) ](Status s) mutable {
+ .onCompletion([this, deadlineFuture = std::move(deadlineFuture)](Status s) mutable {
// Interrupt this coordinator's scheduler hierarchy and join the deadline task's future
// in order to guarantee that there are no more threads running within the coordinator.
_scheduler->shutdown(
{ErrorCodes::TransactionCoordinatorDeadlineTaskCanceled, "Coordinator completed"});
- return std::move(deadlineFuture).onCompletion([ this, s = std::move(s) ](Status) {
+ return std::move(deadlineFuture).onCompletion([this, s = std::move(s)](Status) {
// Notify all the listeners which are interested in the coordinator's lifecycle.
// After this call, the coordinator object could potentially get destroyed by its
// lifetime controller, so there shouldn't be any accesses to `this` after this
@@ -373,8 +373,7 @@ void TransactionCoordinator::_done(Status status) {
if (status == ErrorCodes::TransactionCoordinatorSteppingDown)
status = Status(ErrorCodes::InterruptedDueToReplStateChange,
str::stream() << "Coordinator " << _lsid.getId() << ':' << _txnNumber
- << " stopped due to: "
- << status.reason());
+ << " stopped due to: " << status.reason());
LOG(3) << "Two-phase commit for " << _lsid.getId() << ':' << _txnNumber << " completed with "
<< redact(status);
diff --git a/src/mongo/db/s/transaction_coordinator_catalog.cpp b/src/mongo/db/s/transaction_coordinator_catalog.cpp
index b45b4449838..6fa5d45226e 100644
--- a/src/mongo/db/s/transaction_coordinator_catalog.cpp
+++ b/src/mongo/db/s/transaction_coordinator_catalog.cpp
@@ -61,8 +61,8 @@ void TransactionCoordinatorCatalog::onStepDown() {
stdx::unique_lock<stdx::mutex> ul(_mutex);
std::vector<std::shared_ptr<TransactionCoordinator>> coordinatorsToCancel;
- for (auto && [ sessionId, coordinatorsForSession ] : _coordinatorsBySession) {
- for (auto && [ txnNumber, coordinator ] : coordinatorsForSession) {
+ for (auto&& [sessionId, coordinatorsForSession] : _coordinatorsBySession) {
+ for (auto&& [txnNumber, coordinator] : coordinatorsForSession) {
coordinatorsToCancel.emplace_back(coordinator);
}
}
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.cpp b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
index c27e4c21eee..79128137b6b 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
@@ -83,8 +83,8 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
// rather than going through the host targeting below. This ensures that the state changes
// for the participant and coordinator occur sequentially on a single branch of replica set
// history. See SERVER-38142 for details.
- return scheduleWork([ this, shardId, commandObj = commandObj.getOwned() ](OperationContext *
- opCtx) {
+ return scheduleWork([this, shardId, commandObj = commandObj.getOwned()](
+ OperationContext* opCtx) {
// Note: This internal authorization is tied to the lifetime of the client, which will
// be destroyed by 'scheduleWork' immediately after this lambda ends
AuthorizationSession::get(opCtx->getClient())
@@ -114,8 +114,8 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
}
return _targetHostAsync(shardId, readPref)
- .then([ this, shardId, commandObj = commandObj.getOwned(), readPref ](
- HostAndShard hostAndShard) mutable {
+ .then([this, shardId, commandObj = commandObj.getOwned(), readPref](
+ HostAndShard hostAndShard) mutable {
executor::RemoteCommandRequest request(hostAndShard.hostTargeted,
NamespaceString::kAdminDb.toString(),
commandObj,
@@ -166,7 +166,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
ul.unlock();
return std::move(pf.future).tapAll(
- [ this, it = std::move(it) ](StatusWith<ResponseStatus> s) {
+ [this, it = std::move(it)](StatusWith<ResponseStatus> s) {
stdx::lock_guard<stdx::mutex> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.h b/src/mongo/db/s/transaction_coordinator_futures_util.h
index 1c654d8707f..7aef1fc8e78 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.h
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.h
@@ -115,7 +115,7 @@ public:
ul.unlock();
return std::move(pf.future).tapAll(
- [ this, it = std::move(it) ](StatusOrStatusWith<ReturnType> s) {
+ [this, it = std::move(it)](StatusOrStatusWith<ReturnType> s) {
stdx::lock_guard<stdx::mutex> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
@@ -284,7 +284,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
combiner(std::move(combiner)) {}
/*****************************************************
* The first few fields have fixed values. *
- ******************************************************/
+ ******************************************************/
// Protects all state in the SharedBlock.
stdx::mutex mutex;
@@ -299,7 +299,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
/*****************************************************
* The below have initial values based on user input.*
- ******************************************************/
+ ******************************************************/
// The number of input futures that have not yet been resolved and processed.
size_t numOutstandingResponses;
// The variable where the intermediate results and final result is stored.
@@ -374,26 +374,25 @@ Future<FutureContinuationResult<LoopBodyFn>> doWhile(AsyncWorkScheduler& schedul
LoopBodyFn&& f) {
using ReturnType = typename decltype(f())::value_type;
auto future = f();
- return std::move(future).onCompletion([
- &scheduler,
- backoff = std::move(backoff),
- shouldRetryFn = std::forward<ShouldRetryFn>(shouldRetryFn),
- f = std::forward<LoopBodyFn>(f)
- ](StatusOrStatusWith<ReturnType> s) mutable {
- if (!shouldRetryFn(s))
- return Future<ReturnType>(std::move(s));
-
- // Retry after a delay.
- const auto delayMillis = (backoff ? backoff->nextSleep() : Milliseconds(0));
- return scheduler.scheduleWorkIn(delayMillis, [](OperationContext* opCtx) {}).then([
- &scheduler,
- backoff = std::move(backoff),
- shouldRetryFn = std::move(shouldRetryFn),
- f = std::move(f)
- ]() mutable {
- return doWhile(scheduler, std::move(backoff), std::move(shouldRetryFn), std::move(f));
+ return std::move(future).onCompletion(
+ [&scheduler,
+ backoff = std::move(backoff),
+ shouldRetryFn = std::forward<ShouldRetryFn>(shouldRetryFn),
+ f = std::forward<LoopBodyFn>(f)](StatusOrStatusWith<ReturnType> s) mutable {
+ if (!shouldRetryFn(s))
+ return Future<ReturnType>(std::move(s));
+
+ // Retry after a delay.
+ const auto delayMillis = (backoff ? backoff->nextSleep() : Milliseconds(0));
+ return scheduler.scheduleWorkIn(delayMillis, [](OperationContext* opCtx) {})
+ .then([&scheduler,
+ backoff = std::move(backoff),
+ shouldRetryFn = std::move(shouldRetryFn),
+ f = std::move(f)]() mutable {
+ return doWhile(
+ scheduler, std::move(backoff), std::move(shouldRetryFn), std::move(f));
+ });
});
- });
}
} // namespace txn
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
index f2054c59f62..fb145b325aa 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
@@ -359,7 +359,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledBlockingWorkSucceeds) {
unittest::Barrier barrier(2);
auto pf = makePromiseFuture<int>();
auto future =
- async.scheduleWork([&barrier, future = std::move(pf.future) ](OperationContext * opCtx) {
+ async.scheduleWork([&barrier, future = std::move(pf.future)](OperationContext* opCtx) {
barrier.countDownAndWait();
return future.get(opCtx);
});
@@ -377,7 +377,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledBlockingWorkThrowsException) {
unittest::Barrier barrier(2);
auto pf = makePromiseFuture<int>();
auto future =
- async.scheduleWork([&barrier, future = std::move(pf.future) ](OperationContext * opCtx) {
+ async.scheduleWork([&barrier, future = std::move(pf.future)](OperationContext* opCtx) {
barrier.countDownAndWait();
future.get(opCtx);
uasserted(ErrorCodes::InternalError, "Test error");
@@ -396,7 +396,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledBlockingWorkInSucceeds) {
auto pf = makePromiseFuture<int>();
auto future = async.scheduleWorkIn(
Milliseconds{10},
- [future = std::move(pf.future)](OperationContext * opCtx) { return future.get(opCtx); });
+ [future = std::move(pf.future)](OperationContext* opCtx) { return future.get(opCtx); });
pf.promise.emplaceValue(5);
ASSERT(!future.isReady());
diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp
index dac4caee608..6be674d1ad7 100644
--- a/src/mongo/db/s/transaction_coordinator_service.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service.cpp
@@ -147,7 +147,7 @@ void TransactionCoordinatorService::onStepUp(OperationContext* opCtx,
_catalogAndScheduler->scheduler
.scheduleWorkIn(
recoveryDelayForTesting,
- [catalogAndScheduler = _catalogAndScheduler](OperationContext * opCtx) {
+ [catalogAndScheduler = _catalogAndScheduler](OperationContext* opCtx) {
auto& replClientInfo = repl::ReplClientInfo::forClient(opCtx->getClient());
replClientInfo.setLastOpToSystemLastOpTime(opCtx);
diff --git a/src/mongo/db/s/transaction_coordinator_structures_test.cpp b/src/mongo/db/s/transaction_coordinator_structures_test.cpp
index f29b442559b..df1d3cc2ade 100644
--- a/src/mongo/db/s/transaction_coordinator_structures_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_structures_test.cpp
@@ -44,8 +44,7 @@ TEST(CoordinatorCommitDecisionTest, SerializeCommitHasTimestampAndNoAbortStatus)
ASSERT_BSONOBJ_EQ(BSON("decision"
<< "commit"
- << "commitTimestamp"
- << Timestamp(100, 200)),
+ << "commitTimestamp" << Timestamp(100, 200)),
obj);
}
diff --git a/src/mongo/db/s/transaction_coordinator_test.cpp b/src/mongo/db/s/transaction_coordinator_test.cpp
index 7e88a292067..ebcd839b2ab 100644
--- a/src/mongo/db/s/transaction_coordinator_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_test.cpp
@@ -192,8 +192,7 @@ auto makeDummyPrepareCommand(const LogicalSessionId& lsid, const TxnNumber& txnN
prepareCmd.setDbName(NamespaceString::kAdminDb);
auto prepareObj = prepareCmd.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
return prepareObj;
@@ -546,17 +545,23 @@ protected:
TxnNumber txnNumber,
const std::vector<ShardId>& participants,
const boost::optional<Timestamp>& commitTimestamp) {
- txn::persistDecision(*_aws, lsid, txnNumber, participants, [&] {
- txn::CoordinatorCommitDecision decision;
- if (commitTimestamp) {
- decision.setDecision(txn::CommitDecision::kCommit);
- decision.setCommitTimestamp(commitTimestamp);
- } else {
- decision.setDecision(txn::CommitDecision::kAbort);
- decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction, "Test abort status"));
- }
- return decision;
- }()).get();
+ txn::persistDecision(*_aws,
+ lsid,
+ txnNumber,
+ participants,
+ [&] {
+ txn::CoordinatorCommitDecision decision;
+ if (commitTimestamp) {
+ decision.setDecision(txn::CommitDecision::kCommit);
+ decision.setCommitTimestamp(commitTimestamp);
+ } else {
+ decision.setDecision(txn::CommitDecision::kAbort);
+ decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction,
+ "Test abort status"));
+ }
+ return decision;
+ }())
+ .get();
auto allCoordinatorDocs = txn::readAllCoordinatorDocs(opCtx);
ASSERT_EQUALS(allCoordinatorDocs.size(), size_t(1));
@@ -733,11 +738,17 @@ TEST_F(TransactionCoordinatorDriverPersistenceTest,
// Delete the document for the first transaction and check that only the second transaction's
// document still exists.
- txn::persistDecision(*_aws, _lsid, txnNumber1, _participants, [&] {
- txn::CoordinatorCommitDecision decision(txn::CommitDecision::kAbort);
- decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction, "Test abort error"));
- return decision;
- }()).get();
+ txn::persistDecision(*_aws,
+ _lsid,
+ txnNumber1,
+ _participants,
+ [&] {
+ txn::CoordinatorCommitDecision decision(txn::CommitDecision::kAbort);
+ decision.setAbortStatus(
+ Status(ErrorCodes::NoSuchTransaction, "Test abort error"));
+ return decision;
+ }())
+ .get();
txn::deleteCoordinatorDoc(*_aws, _lsid, txnNumber1).get();
allCoordinatorDocs = txn::readAllCoordinatorDocs(operationContext());
@@ -1466,8 +1477,7 @@ TEST_F(TransactionCoordinatorMetricsTest, SimpleTwoPhaseCommitRealCoordinator) {
setGlobalFailPoint("hangBeforeWaitingForParticipantListWriteConcern",
BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("useUninterruptibleSleep" << 1)));
+ << "data" << BSON("useUninterruptibleSleep" << 1)));
coordinator.runCommit(kTwoShardIdList);
waitUntilCoordinatorDocIsPresent();
@@ -1511,8 +1521,7 @@ TEST_F(TransactionCoordinatorMetricsTest, SimpleTwoPhaseCommitRealCoordinator) {
setGlobalFailPoint("hangBeforeWaitingForDecisionWriteConcern",
BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("useUninterruptibleSleep" << 1)));
+ << "data" << BSON("useUninterruptibleSleep" << 1)));
// Respond to the second prepare request in a separate thread, because the coordinator will
// hijack that thread to run its continuation.
assertPrepareSentAndRespondWithSuccess();
@@ -1562,8 +1571,7 @@ TEST_F(TransactionCoordinatorMetricsTest, SimpleTwoPhaseCommitRealCoordinator) {
setGlobalFailPoint("hangAfterDeletingCoordinatorDoc",
BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("useUninterruptibleSleep" << 1)));
+ << "data" << BSON("useUninterruptibleSleep" << 1)));
// Respond to the second commit request in a separate thread, because the coordinator will
// hijack that thread to run its continuation.
assertCommitSentAndRespondWithSuccess();
@@ -2122,11 +2130,10 @@ TEST_F(TransactionCoordinatorMetricsTest, SlowLogLineIncludesTransactionParamete
runSimpleTwoPhaseCommitWithCommitDecisionAndCaptureLogLines();
BSONObjBuilder lsidBob;
_lsid.serialize(&lsidBob);
- ASSERT_EQUALS(
- 1,
- countLogLinesContaining(str::stream() << "parameters:{ lsid: " << lsidBob.done().toString()
- << ", txnNumber: "
- << _txnNumber));
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(str::stream()
+ << "parameters:{ lsid: " << lsidBob.done().toString()
+ << ", txnNumber: " << _txnNumber));
}
TEST_F(TransactionCoordinatorMetricsTest,
diff --git a/src/mongo/db/s/transaction_coordinator_util.cpp b/src/mongo/db/s/transaction_coordinator_util.cpp
index f49da0ac61f..dbffc60de1d 100644
--- a/src/mongo/db/s/transaction_coordinator_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_util.cpp
@@ -126,8 +126,7 @@ repl::OpTime persistParticipantListBlocking(OperationContext* opCtx,
BSONObj sameParticipantList =
BSON("$and" << buildParticipantListMatchesConditions(participantList));
entry.setQ(BSON(TransactionCoordinatorDocument::kIdFieldName
- << sessionInfo.toBSON()
- << "$or"
+ << sessionInfo.toBSON() << "$or"
<< BSON_ARRAY(noParticipantList << sameParticipantList)));
// Update with participant list.
@@ -154,13 +153,9 @@ repl::OpTime persistParticipantListBlocking(OperationContext* opCtx,
QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51025,
str::stream() << "While attempting to write participant list "
- << buildParticipantListString(participantList)
- << " for "
- << lsid.getId()
- << ':'
- << txnNumber
- << ", found document with a different participant list: "
- << doc);
+ << buildParticipantListString(participantList) << " for "
+ << lsid.getId() << ':' << txnNumber
+ << ", found document with a different participant list: " << doc);
}
// Throw any other error.
@@ -223,8 +218,7 @@ Future<PrepareVoteConsensus> sendPrepare(ServiceContext* service,
prepareTransaction.setDbName(NamespaceString::kAdminDb);
auto prepareObj = prepareTransaction.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
std::vector<Future<PrepareResponse>> responses;
@@ -245,7 +239,7 @@ Future<PrepareVoteConsensus> sendPrepare(ServiceContext* service,
// Initial value
PrepareVoteConsensus{int(participants.size())},
// Aggregates an incoming response (next) with the existing aggregate value (result)
- [&prepareScheduler = *prepareScheduler](PrepareVoteConsensus & result,
+ [&prepareScheduler = *prepareScheduler](PrepareVoteConsensus& result,
const PrepareResponse& next) {
result.registerVote(next);
@@ -300,10 +294,8 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx,
BSON(TransactionCoordinatorDocument::kDecisionFieldName << decision.toBSON());
entry.setQ(BSON(TransactionCoordinatorDocument::kIdFieldName
- << sessionInfo.toBSON()
- << "$and"
- << buildParticipantListMatchesConditions(participantList)
- << "$or"
+ << sessionInfo.toBSON() << "$and"
+ << buildParticipantListMatchesConditions(participantList) << "$or"
<< BSON_ARRAY(noDecision << sameDecision)));
entry.setU([&] {
@@ -333,11 +325,8 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx,
QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51026,
str::stream() << "While attempting to write decision "
- << (isCommit ? "'commit'" : "'abort'")
- << " for"
- << lsid.getId()
- << ':'
- << txnNumber
+ << (isCommit ? "'commit'" : "'abort'") << " for" << lsid.getId()
+ << ':' << txnNumber
<< ", either failed to find document for this lsid:txnNumber or "
"document existed with a different participant list, decision "
"or commitTimestamp: "
@@ -379,8 +368,7 @@ Future<void> sendCommit(ServiceContext* service,
commitTransaction.setCommitTimestamp(commitTimestamp);
auto commitObj = commitTransaction.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
std::vector<Future<void>> responses;
for (const auto& participant : participants) {
@@ -398,8 +386,7 @@ Future<void> sendAbort(ServiceContext* service,
abortTransaction.setDbName(NamespaceString::kAdminDb);
auto abortObj = abortTransaction.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
std::vector<Future<void>> responses;
for (const auto& participant : participants) {
@@ -529,12 +516,12 @@ Future<PrepareResponse> sendPrepareToShard(ServiceContext* service,
swPrepareResponse != ErrorCodes::TransactionCoordinatorSteppingDown &&
swPrepareResponse != ErrorCodes::TransactionCoordinatorReachedAbortDecision;
},
- [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned() ] {
+ [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned()] {
LOG(3) << "Coordinator going to send command " << commandObj << " to "
<< (isLocalShard ? " local " : "") << " shard " << shardId;
return scheduler.scheduleRemoteCommand(shardId, kPrimaryReadPreference, commandObj)
- .then([ shardId, commandObj = commandObj.getOwned() ](ResponseStatus response) {
+ .then([shardId, commandObj = commandObj.getOwned()](ResponseStatus response) {
auto status = getStatusFromCommandResult(response.data);
auto wcStatus = getWriteConcernStatusFromCommandResult(response.data);
@@ -621,12 +608,12 @@ Future<void> sendDecisionToShard(ServiceContext* service,
// coordinator-specific code.
return !s.isOK() && s != ErrorCodes::TransactionCoordinatorSteppingDown;
},
- [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned() ] {
+ [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned()] {
LOG(3) << "Coordinator going to send command " << commandObj << " to "
<< (isLocalShard ? "local" : "") << " shard " << shardId;
return scheduler.scheduleRemoteCommand(shardId, kPrimaryReadPreference, commandObj)
- .then([ shardId, commandObj = commandObj.getOwned() ](ResponseStatus response) {
+ .then([shardId, commandObj = commandObj.getOwned()](ResponseStatus response) {
auto status = getStatusFromCommandResult(response.data);
auto wcStatus = getWriteConcernStatusFromCommandResult(response.data);
diff --git a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
index 3cb6b8c1cbe..b48811ec994 100644
--- a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
+++ b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
@@ -132,12 +132,11 @@ public:
replClient.setLastOp(opCtx, prepareOpTime);
}
- invariant(opCtx->recoveryUnit()->getPrepareTimestamp() ==
- prepareOpTime.getTimestamp(),
- str::stream() << "recovery unit prepareTimestamp: "
- << opCtx->recoveryUnit()->getPrepareTimestamp().toString()
- << " participant prepareOpTime: "
- << prepareOpTime.toString());
+ invariant(
+ opCtx->recoveryUnit()->getPrepareTimestamp() == prepareOpTime.getTimestamp(),
+ str::stream() << "recovery unit prepareTimestamp: "
+ << opCtx->recoveryUnit()->getPrepareTimestamp().toString()
+ << " participant prepareOpTime: " << prepareOpTime.toString());
if (MONGO_FAIL_POINT(
participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic)) {
diff --git a/src/mongo/db/s/type_shard_identity_test.cpp b/src/mongo/db/s/type_shard_identity_test.cpp
index 56c2ca059de..b4999f5c6eb 100644
--- a/src/mongo/db/s/type_shard_identity_test.cpp
+++ b/src/mongo/db/s/type_shard_identity_test.cpp
@@ -46,9 +46,7 @@ TEST(ShardIdentityType, RoundTrip) {
<< "shardIdentity"
<< "shardName"
<< "s1"
- << "clusterId"
- << clusterId
- << "configsvrConnectionString"
+ << "clusterId" << clusterId << "configsvrConnectionString"
<< "test/a:123");
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
@@ -67,8 +65,7 @@ TEST(ShardIdentityType, ParseMissingId) {
<< "test/a:123"
<< "shardName"
<< "s1"
- << "clusterId"
- << OID::gen());
+ << "clusterId" << OID::gen());
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -79,8 +76,7 @@ TEST(ShardIdentityType, ParseMissingConfigsvrConnString) {
<< "shardIdentity"
<< "shardName"
<< "s1"
- << "clusterId"
- << OID::gen());
+ << "clusterId" << OID::gen());
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -91,8 +87,7 @@ TEST(ShardIdentityType, ParseMissingShardName) {
<< "shardIdentity"
<< "configsvrConnectionString"
<< "test/a:123"
- << "clusterId"
- << OID::gen());
+ << "clusterId" << OID::gen());
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -118,8 +113,7 @@ TEST(ShardIdentityType, InvalidConnectionString) {
<< "test/,,,"
<< "shardName"
<< "s1"
- << "clusterId"
- << clusterId);
+ << "clusterId" << clusterId);
ASSERT_EQ(ErrorCodes::FailedToParse,
ShardIdentityType::fromShardIdentityDocument(doc).getStatus());
@@ -133,8 +127,7 @@ TEST(ShardIdentityType, NonReplSetConnectionString) {
<< "local:123"
<< "shardName"
<< "s1"
- << "clusterId"
- << clusterId);
+ << "clusterId" << clusterId);
ASSERT_EQ(ErrorCodes::UnsupportedFormat,
ShardIdentityType::fromShardIdentityDocument(doc).getStatus());
@@ -147,5 +140,5 @@ TEST(ShardIdentityType, CreateUpdateObject) {
ASSERT_BSONOBJ_EQ(expectedObj, updateObj);
}
+} // namespace
} // namespace mongo
-} // unnamed namespace
diff --git a/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp b/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp
index 1ff67ff3257..d1ceaaeeba6 100644
--- a/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp
+++ b/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp
@@ -90,5 +90,5 @@ MONGO_INITIALIZER(RegisterWaitForOngoingChunkSplitsCommand)(InitializerContext*
}
return Status::OK();
}
-}
-}
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/server_options.h b/src/mongo/db/server_options.h
index 08a944fec1d..73f49e8e150 100644
--- a/src/mongo/db/server_options.h
+++ b/src/mongo/db/server_options.h
@@ -130,23 +130,23 @@ struct ServerGlobalParams {
enum ClusterAuthModes {
ClusterAuthMode_undefined,
/**
- * Authenticate using keyfile, accept only keyfiles
- */
+ * Authenticate using keyfile, accept only keyfiles
+ */
ClusterAuthMode_keyFile,
/**
- * Authenticate using keyfile, accept both keyfiles and X.509
- */
+ * Authenticate using keyfile, accept both keyfiles and X.509
+ */
ClusterAuthMode_sendKeyFile,
/**
- * Authenticate using X.509, accept both keyfiles and X.509
- */
+ * Authenticate using X.509, accept both keyfiles and X.509
+ */
ClusterAuthMode_sendX509,
/**
- * Authenticate using X.509, accept only X.509
- */
+ * Authenticate using X.509, accept only X.509
+ */
ClusterAuthMode_x509
};
@@ -271,4 +271,4 @@ struct TraitNamedDomain {
return ret;
}
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/server_options_helpers.h b/src/mongo/db/server_options_helpers.h
index da7098f23c4..a79dde98b39 100644
--- a/src/mongo/db/server_options_helpers.h
+++ b/src/mongo/db/server_options_helpers.h
@@ -43,18 +43,18 @@ class Environment;
namespace moe = mongo::optionenvironment;
/**
-* Handle custom validation of base options that can not currently be done by using
-* Constraints in the Environment. See the "validate" function in the Environment class for
-* more details.
-*/
+ * Handle custom validation of base options that can not currently be done by using
+ * Constraints in the Environment. See the "validate" function in the Environment class for
+ * more details.
+ */
Status validateBaseOptions(const moe::Environment& params);
/**
-* Canonicalize base options for the given environment.
-*
-* For example, the options "objcheck", "noobjcheck", and "net.wireObjectCheck" should all be
-* merged into "net.wireObjectCheck".
-*/
+ * Canonicalize base options for the given environment.
+ *
+ * For example, the options "objcheck", "noobjcheck", and "net.wireObjectCheck" should all be
+ * merged into "net.wireObjectCheck".
+ */
Status canonicalizeBaseOptions(moe::Environment* params);
/**
@@ -67,11 +67,11 @@ Status canonicalizeBaseOptions(moe::Environment* params);
Status setupBaseOptions(const std::vector<std::string>& args);
/**
-* Store the given parsed params in global server state.
-*
-* For example, sets the serverGlobalParams.quiet variable based on the systemLog.quiet config
-* parameter.
-*/
+ * Store the given parsed params in global server state.
+ *
+ * For example, sets the serverGlobalParams.quiet variable based on the systemLog.quiet config
+ * parameter.
+ */
Status storeBaseOptions(const moe::Environment& params);
} // namespace mongo
diff --git a/src/mongo/db/service_context_test_fixture.h b/src/mongo/db/service_context_test_fixture.h
index edbd5021816..e7508898c0f 100644
--- a/src/mongo/db/service_context_test_fixture.h
+++ b/src/mongo/db/service_context_test_fixture.h
@@ -39,9 +39,9 @@ namespace mongo {
class ScopedGlobalServiceContextForTest {
public:
/**
- * Returns a service context, which is only valid for this instance of the test.
- * Must not be called before setUp or after tearDown.
- */
+ * Returns a service context, which is only valid for this instance of the test.
+ * Must not be called before setUp or after tearDown.
+ */
ServiceContext* getServiceContext();
protected:
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 59e88663ef2..39343d97e39 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -123,9 +123,10 @@ void generateLegacyQueryErrorResponse(const AssertionException& exception,
curop->debug().errInfo = exception.toStatus();
log(LogComponent::kQuery) << "assertion " << exception.toString() << " ns:" << queryMessage.ns
- << " query:" << (queryMessage.query.valid(BSONVersion::kLatest)
- ? redact(queryMessage.query)
- : "query object is corrupt");
+ << " query:"
+ << (queryMessage.query.valid(BSONVersion::kLatest)
+ ? redact(queryMessage.query)
+ : "query object is corrupt");
if (queryMessage.ntoskip || queryMessage.ntoreturn) {
log(LogComponent::kQuery) << " ntoskip:" << queryMessage.ntoskip
<< " ntoreturn:" << queryMessage.ntoreturn;
@@ -977,8 +978,8 @@ DbResponse receivedCommands(OperationContext* opCtx,
// However, the complete command object will still be echoed to the client.
if (!(c = CommandHelpers::findCommand(request.getCommandName()))) {
globalCommandRegistry()->incrementUnknownCommands();
- std::string msg = str::stream() << "no such command: '" << request.getCommandName()
- << "'";
+ std::string msg = str::stream()
+ << "no such command: '" << request.getCommandName() << "'";
LOG(2) << msg;
uasserted(ErrorCodes::CommandNotFound, str::stream() << msg);
}
@@ -1014,12 +1015,10 @@ DbResponse receivedCommands(OperationContext* opCtx,
if (LastError::get(opCtx->getClient()).hadNotMasterError()) {
notMasterUnackWrites.increment();
uasserted(ErrorCodes::NotMaster,
- str::stream() << "Not-master error while processing '"
- << request.getCommandName()
- << "' operation on '"
- << request.getDatabase()
- << "' database via "
- << "fire-and-forget command execution.");
+ str::stream()
+ << "Not-master error while processing '" << request.getCommandName()
+ << "' operation on '" << request.getDatabase() << "' database via "
+ << "fire-and-forget command execution.");
}
return {}; // Don't reply.
}
@@ -1318,10 +1317,8 @@ DbResponse ServiceEntryPointCommon::handleRequest(OperationContext* opCtx,
if (!opCtx->getClient()->isInDirectClient()) {
uassert(18663,
str::stream() << "legacy writeOps not longer supported for "
- << "versioned connections, ns: "
- << nsString.ns()
- << ", op: "
- << networkOpToString(op),
+ << "versioned connections, ns: " << nsString.ns()
+ << ", op: " << networkOpToString(op),
!ShardedConnectionInfo::get(&c, false));
}
@@ -1349,12 +1346,10 @@ DbResponse ServiceEntryPointCommon::handleRequest(OperationContext* opCtx,
if (LastError::get(opCtx->getClient()).hadNotMasterError()) {
notMasterLegacyUnackWrites.increment();
uasserted(ErrorCodes::NotMaster,
- str::stream() << "Not-master error while processing '"
- << networkOpToString(op)
- << "' operation on '"
- << nsString
- << "' namespace via legacy "
- << "fire-and-forget command execution.");
+ str::stream()
+ << "Not-master error while processing '" << networkOpToString(op)
+ << "' operation on '" << nsString << "' namespace via legacy "
+ << "fire-and-forget command execution.");
}
}
diff --git a/src/mongo/db/session_catalog_mongod.cpp b/src/mongo/db/session_catalog_mongod.cpp
index e52a99383f1..2f6145f0287 100644
--- a/src/mongo/db/session_catalog_mongod.cpp
+++ b/src/mongo/db/session_catalog_mongod.cpp
@@ -37,7 +37,6 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/ops/write_ops.h"
@@ -92,8 +91,8 @@ void killSessionTokens(OperationContext* opCtx,
return;
getThreadPool(opCtx)->schedule(
- [ service = opCtx->getServiceContext(),
- sessionKillTokens = std::move(sessionKillTokens) ](auto status) mutable {
+ [service = opCtx->getServiceContext(),
+ sessionKillTokens = std::move(sessionKillTokens)](auto status) mutable {
invariant(status);
ThreadClient tc("Kill-Sessions", service);
@@ -185,11 +184,10 @@ void createTransactionTable(OperationContext* opCtx) {
return;
}
- uassertStatusOKWithContext(status,
- str::stream()
- << "Failed to create the "
- << NamespaceString::kSessionTransactionsTableNamespace.ns()
- << " collection");
+ uassertStatusOKWithContext(
+ status,
+ str::stream() << "Failed to create the "
+ << NamespaceString::kSessionTransactionsTableNamespace.ns() << " collection");
}
void abortInProgressTransactions(OperationContext* opCtx) {
diff --git a/src/mongo/db/session_catalog_test.cpp b/src/mongo/db/session_catalog_test.cpp
index ec99c76ffde..2ef67c1f884 100644
--- a/src/mongo/db/session_catalog_test.cpp
+++ b/src/mongo/db/session_catalog_test.cpp
@@ -124,12 +124,14 @@ TEST_F(SessionCatalogTest, ScanSession) {
makeLogicalSessionIdForTest(),
makeLogicalSessionIdForTest()};
for (const auto& lsid : lsids) {
- stdx::async(stdx::launch::async, [this, lsid] {
- ThreadClient tc(getServiceContext());
- auto opCtx = makeOperationContext();
- opCtx->setLogicalSessionId(lsid);
- OperationContextSession ocs(opCtx.get());
- }).get();
+ stdx::async(stdx::launch::async,
+ [this, lsid] {
+ ThreadClient tc(getServiceContext());
+ auto opCtx = makeOperationContext();
+ opCtx->setLogicalSessionId(lsid);
+ OperationContextSession ocs(opCtx.get());
+ })
+ .get();
}
catalog()->scanSession(lsids[0], [&lsids](const ObservableSession& session) {
@@ -155,12 +157,14 @@ TEST_F(SessionCatalogTest, ScanSessionMarkForReapWhenSessionIsIdle) {
makeLogicalSessionIdForTest(),
makeLogicalSessionIdForTest()};
for (const auto& lsid : lsids) {
- stdx::async(stdx::launch::async, [this, lsid] {
- ThreadClient tc(getServiceContext());
- auto opCtx = makeOperationContext();
- opCtx->setLogicalSessionId(lsid);
- OperationContextSession ocs(opCtx.get());
- }).get();
+ stdx::async(stdx::launch::async,
+ [this, lsid] {
+ ThreadClient tc(getServiceContext());
+ auto opCtx = makeOperationContext();
+ opCtx->setLogicalSessionId(lsid);
+ OperationContextSession ocs(opCtx.get());
+ })
+ .get();
}
catalog()->scanSession(lsids[0],
@@ -197,12 +201,14 @@ TEST_F(SessionCatalogTestWithDefaultOpCtx, ScanSessions) {
makeLogicalSessionIdForTest(),
makeLogicalSessionIdForTest()};
for (const auto& lsid : lsids) {
- stdx::async(stdx::launch::async, [this, lsid] {
- ThreadClient tc(getServiceContext());
- auto opCtx = makeOperationContext();
- opCtx->setLogicalSessionId(lsid);
- OperationContextSession ocs(opCtx.get());
- }).get();
+ stdx::async(stdx::launch::async,
+ [this, lsid] {
+ ThreadClient tc(getServiceContext());
+ auto opCtx = makeOperationContext();
+ opCtx->setLogicalSessionId(lsid);
+ OperationContextSession ocs(opCtx.get());
+ })
+ .get();
}
// Scan over all Sessions.
diff --git a/src/mongo/db/sessions_collection_config_server.h b/src/mongo/db/sessions_collection_config_server.h
index 3338979d8e2..bdfac76abff 100644
--- a/src/mongo/db/sessions_collection_config_server.h
+++ b/src/mongo/db/sessions_collection_config_server.h
@@ -46,18 +46,18 @@ class OperationContext;
class SessionsCollectionConfigServer : public SessionsCollectionSharded {
public:
/**
- * Ensures that the sessions collection has been set up for this cluster,
- * sharded, and with the proper indexes.
- *
- * This method may safely be called multiple times.
- *
- * If there are no shards in this cluster, this method will do nothing.
- */
+ * Ensures that the sessions collection has been set up for this cluster,
+ * sharded, and with the proper indexes.
+ *
+ * This method may safely be called multiple times.
+ *
+ * If there are no shards in this cluster, this method will do nothing.
+ */
Status setupSessionsCollection(OperationContext* opCtx) override;
/**
- * Checks if the sessions collection exists.
- */
+ * Checks if the sessions collection exists.
+ */
Status checkSessionsCollectionExists(OperationContext* opCtx) override;
private:
diff --git a/src/mongo/db/sorter/sorter.cpp b/src/mongo/db/sorter/sorter.cpp
index 25d1b9e77f1..663e0f47c6f 100644
--- a/src/mongo/db/sorter/sorter.cpp
+++ b/src/mongo/db/sorter/sorter.cpp
@@ -188,24 +188,21 @@ public:
void openSource() {
_file.open(_fileName.c_str(), std::ios::in | std::ios::binary);
uassert(16814,
- str::stream() << "error opening file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
+ str::stream() << "error opening file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
_file.good());
_file.seekg(_fileStartOffset);
uassert(50979,
str::stream() << "error seeking starting offset of '" << _fileStartOffset
- << "' in file \""
- << _fileName
- << "\": "
- << myErrnoWithDescription(),
+ << "' in file \"" << _fileName << "\": " << myErrnoWithDescription(),
_file.good());
}
void closeSource() {
_file.close();
uassert(50969,
- str::stream() << "error closing file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
+ str::stream() << "error closing file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
!_file.fail());
// If the file iterator reads through all data objects, we can ensure non-corrupt data
@@ -328,8 +325,8 @@ private:
const std::streampos offset = _file.tellg();
uassert(51049,
- str::stream() << "error reading file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
+ str::stream() << "error reading file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
offset >= 0);
if (offset >= _fileEndOffset) {
@@ -340,8 +337,8 @@ private:
_file.read(reinterpret_cast<char*>(out), size);
uassert(16817,
- str::stream() << "error reading file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
+ str::stream() << "error reading file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
_file.good());
verify(_file.gcount() == static_cast<std::streamsize>(size));
}
@@ -605,8 +602,7 @@ private:
// need to be revisited.
uasserted(16819,
str::stream()
- << "Sort exceeded memory limit of "
- << _opts.maxMemoryUsageBytes
+ << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
<< " bytes, but did not opt in to external sorting. Aborting operation."
<< " Pass allowDiskUse:true to opt in.");
}
@@ -893,8 +889,7 @@ private:
// need to be revisited.
uasserted(16820,
str::stream()
- << "Sort exceeded memory limit of "
- << _opts.maxMemoryUsageBytes
+ << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
<< " bytes, but did not opt in to external sorting. Aborting operation."
<< " Pass allowDiskUse:true to opt in.");
}
@@ -970,8 +965,8 @@ SortedFileWriter<Key, Value>::SortedFileWriter(const SortOptions& opts,
// limits.
_file.open(_fileName.c_str(), std::ios::binary | std::ios::app | std::ios::out);
uassert(16818,
- str::stream() << "error opening file \"" << _fileName << "\": "
- << sorter::myErrnoWithDescription(),
+ str::stream() << "error opening file \"" << _fileName
+ << "\": " << sorter::myErrnoWithDescription(),
_file.good());
// The file descriptor is positioned at the end of a file when opened in append mode, but
// _file.tellp() is not initialized on all systems to reflect this. Therefore, we must also pass
@@ -1044,8 +1039,8 @@ void SortedFileWriter<Key, Value>::spill() {
_file.write(outBuffer, std::abs(size));
} catch (const std::exception&) {
msgasserted(16821,
- str::stream() << "error writing to file \"" << _fileName << "\": "
- << sorter::myErrnoWithDescription());
+ str::stream() << "error writing to file \"" << _fileName
+ << "\": " << sorter::myErrnoWithDescription());
}
_buffer.reset();
@@ -1057,8 +1052,7 @@ SortIteratorInterface<Key, Value>* SortedFileWriter<Key, Value>::done() {
std::streampos currentFileOffset = _file.tellp();
uassert(50980,
str::stream() << "error fetching current file descriptor offset in file \"" << _fileName
- << "\": "
- << sorter::myErrnoWithDescription(),
+ << "\": " << sorter::myErrnoWithDescription(),
currentFileOffset >= 0);
// In case nothing was written to disk, use _fileStartOffset because tellp() may not be
@@ -1106,4 +1100,4 @@ Sorter<Key, Value>* Sorter<Key, Value>::make(const SortOptions& opts,
return new sorter::TopKSorter<Key, Value, Comparator>(opts, comp, settings);
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/sorter/sorter.h b/src/mongo/db/sorter/sorter.h
index 87d80a45f92..4dd6f4fc4c5 100644
--- a/src/mongo/db/sorter/sorter.h
+++ b/src/mongo/db/sorter/sorter.h
@@ -272,7 +272,7 @@ private:
std::streampos _fileStartOffset;
std::streampos _fileEndOffset;
};
-}
+} // namespace mongo
/**
* #include "mongo/db/sorter/sorter.cpp" and call this in a single translation
diff --git a/src/mongo/db/startup_warnings_common.cpp b/src/mongo/db/startup_warnings_common.cpp
index 31a8b6c04b8..099df94ceac 100644
--- a/src/mongo/db/startup_warnings_common.cpp
+++ b/src/mongo/db/startup_warnings_common.cpp
@@ -100,9 +100,9 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
#endif
/*
- * We did not add the message to startupWarningsLog as the user can not
- * specify a sslCAFile parameter from the shell
- */
+ * We did not add the message to startupWarningsLog as the user can not
+ * specify a sslCAFile parameter from the shell
+ */
if (sslGlobalParams.sslMode.load() != SSLParams::SSLMode_disabled &&
#ifdef MONGO_CONFIG_SSL_CERTIFICATE_SELECTORS
sslGlobalParams.sslCertificateSelector.empty() &&
diff --git a/src/mongo/db/startup_warnings_mongod.cpp b/src/mongo/db/startup_warnings_mongod.cpp
index 470fc90388e..8cffdb2088a 100644
--- a/src/mongo/db/startup_warnings_mongod.cpp
+++ b/src/mongo/db/startup_warnings_mongod.cpp
@@ -111,9 +111,9 @@ StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter
opMode = line.substr(posBegin + 1, posEnd - posBegin - 1);
if (opMode.empty()) {
- return StatusWith<std::string>(
- ErrorCodes::BadValue,
- str::stream() << "invalid mode in " << filename << ": '" << line << "'");
+ return StatusWith<std::string>(ErrorCodes::BadValue,
+ str::stream() << "invalid mode in " << filename << ": '"
+ << line << "'");
}
// Check against acceptable values of opMode.
@@ -122,16 +122,12 @@ StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter
ErrorCodes::BadValue,
str::stream()
<< "** WARNING: unrecognized transparent Huge Pages mode of operation in "
- << filename
- << ": '"
- << opMode
- << "''");
+ << filename << ": '" << opMode << "''");
}
} catch (const boost::filesystem::filesystem_error& err) {
return StatusWith<std::string>(ErrorCodes::UnknownError,
str::stream() << "Failed to probe \"" << err.path1().string()
- << "\": "
- << err.code().message());
+ << "\": " << err.code().message());
}
return StatusWith<std::string>(opMode);
diff --git a/src/mongo/db/stats/counters.cpp b/src/mongo/db/stats/counters.cpp
index 4c5fa73bcd4..5e667340d1a 100644
--- a/src/mongo/db/stats/counters.cpp
+++ b/src/mongo/db/stats/counters.cpp
@@ -159,4 +159,4 @@ void NetworkCounter::append(BSONObjBuilder& b) {
OpCounters globalOpCounters;
OpCounters replOpCounters;
NetworkCounter networkCounter;
-}
+} // namespace mongo
diff --git a/src/mongo/db/stats/counters.h b/src/mongo/db/stats/counters.h
index d74402c8571..d7b8a0b88ec 100644
--- a/src/mongo/db/stats/counters.h
+++ b/src/mongo/db/stats/counters.h
@@ -139,4 +139,4 @@ private:
};
extern NetworkCounter networkCounter;
-}
+} // namespace mongo
diff --git a/src/mongo/db/stats/fine_clock.h b/src/mongo/db/stats/fine_clock.h
index d01c2e74d4a..fe793ef16bc 100644
--- a/src/mongo/db/stats/fine_clock.h
+++ b/src/mongo/db/stats/fine_clock.h
@@ -69,6 +69,6 @@ public:
return diff;
}
};
-}
+} // namespace mongo
#endif // DB_STATS_FINE_CLOCK_HEADER
diff --git a/src/mongo/db/stats/timer_stats.cpp b/src/mongo/db/stats/timer_stats.cpp
index bb52e0226d7..35b1027fff1 100644
--- a/src/mongo/db/stats/timer_stats.cpp
+++ b/src/mongo/db/stats/timer_stats.cpp
@@ -69,4 +69,4 @@ BSONObj TimerStats::getReport() const {
b.appendNumber("totalMillis", t);
return b.obj();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/stats/timer_stats.h b/src/mongo/db/stats/timer_stats.h
index d09533bd537..029a238577c 100644
--- a/src/mongo/db/stats/timer_stats.h
+++ b/src/mongo/db/stats/timer_stats.h
@@ -88,4 +88,4 @@ private:
bool _recorded;
Timer _t;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_record_store.cpp b/src/mongo/db/storage/biggie/biggie_record_store.cpp
index dcb9cde53bd..4c47df9cd7b 100644
--- a/src/mongo/db/storage/biggie/biggie_record_store.cpp
+++ b/src/mongo/db/storage/biggie/biggie_record_store.cpp
@@ -55,8 +55,7 @@ Ordering allAscending = Ordering::make(BSONObj());
auto const version = KeyString::Version::V1;
BSONObj const sample = BSON(""
<< "s"
- << ""
- << (int64_t)0);
+ << "" << (int64_t)0);
std::string createKey(StringData ident, int64_t recordId) {
KeyString::Builder ks(version, BSON("" << ident << "" << recordId), allAscending);
@@ -561,7 +560,7 @@ RecordStore::SizeAdjuster::~SizeAdjuster() {
int64_t deltaDataSize = _workingCopy->dataSize() - _origDataSize;
_rs->_numRecords.fetchAndAdd(deltaNumRecords);
_rs->_dataSize.fetchAndAdd(deltaDataSize);
- RecoveryUnit::get(_opCtx)->onRollback([ rs = _rs, deltaNumRecords, deltaDataSize ]() {
+ RecoveryUnit::get(_opCtx)->onRollback([rs = _rs, deltaNumRecords, deltaDataSize]() {
invariant(rs->_numRecords.load() >= deltaNumRecords);
rs->_numRecords.fetchAndSubtract(deltaNumRecords);
rs->_dataSize.fetchAndSubtract(deltaDataSize);
diff --git a/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp b/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp
index 6a0ccceba63..03aa3330b60 100644
--- a/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp
+++ b/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp
@@ -59,12 +59,8 @@ public:
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
- << "v"
- << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
- << "ns"
- << ns
- << "unique"
- << unique);
+ << "v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
+ << "ns" << ns << "unique" << unique);
if (partial) {
auto partialBSON =
BSON(IndexDescriptor::kPartialFilterExprFieldName.toString() << BSON(""
diff --git a/src/mongo/db/storage/biggie/store.h b/src/mongo/db/storage/biggie/store.h
index 6c0c883f108..a09b5b49b63 100644
--- a/src/mongo/db/storage/biggie/store.h
+++ b/src/mongo/db/storage/biggie/store.h
@@ -153,10 +153,10 @@ public:
: _root(root), _current(current) {}
/**
- * This function traverses the tree to find the next left-most node with data. Modifies
- * '_current' to point to this node. It uses a pre-order traversal ('visit' the current
- * node itself then 'visit' the child subtrees from left to right).
- */
+ * This function traverses the tree to find the next left-most node with data. Modifies
+ * '_current' to point to this node. It uses a pre-order traversal ('visit' the current
+ * node itself then 'visit' the child subtrees from left to right).
+ */
void _findNext() {
// If 'current' is a nullptr there is no next node to go to.
if (_current == nullptr)
diff --git a/src/mongo/db/storage/biggie/store_test.cpp b/src/mongo/db/storage/biggie/store_test.cpp
index cc4c8a5d7ca..e75a81bc7c7 100644
--- a/src/mongo/db/storage/biggie/store_test.cpp
+++ b/src/mongo/db/storage/biggie/store_test.cpp
@@ -2492,5 +2492,5 @@ TEST_F(RadixStoreTest, LowerBoundEndpoint) {
ASSERT_TRUE(it == thisStore.end());
}
-} // biggie namespace
-} // mongo namespace
+} // namespace biggie
+} // namespace mongo
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index b6485febf42..a90848175a0 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -254,4 +254,4 @@ void BSONCollectionCatalogEntry::MetaData::parse(const BSONObj& obj) {
prefix = KVPrefix::fromBSONElement(obj["prefix"]);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h
index 15405b1942d..4f71435937c 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.h
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.h
@@ -110,4 +110,4 @@ public:
KVPrefix prefix = KVPrefix::kNotPrefixed;
};
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/capped_callback.h b/src/mongo/db/storage/capped_callback.h
index 44b11310544..cced6f61f5d 100644
--- a/src/mongo/db/storage/capped_callback.h
+++ b/src/mongo/db/storage/capped_callback.h
@@ -63,4 +63,4 @@ public:
*/
virtual void notifyCappedWaitersIfNeeded() = 0;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.h b/src/mongo/db/storage/devnull/devnull_kv_engine.h
index d0645b897b2..ad4f95460b7 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.h
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.h
@@ -156,4 +156,4 @@ private:
int _cachePressureForTest;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/durable_catalog_impl.cpp b/src/mongo/db/storage/durable_catalog_impl.cpp
index 208566a2385..c8b849206c1 100644
--- a/src/mongo/db/storage/durable_catalog_impl.cpp
+++ b/src/mongo/db/storage/durable_catalog_impl.cpp
@@ -804,7 +804,7 @@ StatusWith<std::unique_ptr<RecordStore>> DurableCatalogImpl::createCollection(
}
CollectionUUID uuid = options.uuid.get();
- opCtx->recoveryUnit()->onRollback([ opCtx, catalog = this, nss, ident, uuid ]() {
+ opCtx->recoveryUnit()->onRollback([opCtx, catalog = this, nss, ident, uuid]() {
// Intentionally ignoring failure
catalog->_engine->getEngine()->dropIdent(opCtx, ident).ignore();
});
@@ -871,7 +871,7 @@ Status DurableCatalogImpl::dropCollection(OperationContext* opCtx, const Namespa
// This will notify the storageEngine to drop the collection only on WUOW::commit().
opCtx->recoveryUnit()->onCommit(
- [ opCtx, catalog = this, nss, uuid, ident ](boost::optional<Timestamp> commitTimestamp) {
+ [opCtx, catalog = this, nss, uuid, ident](boost::optional<Timestamp> commitTimestamp) {
StorageEngineInterface* engine = catalog->_engine;
auto storageEngine = engine->getStorageEngine();
if (storageEngine->supportsPendingDrops() && commitTimestamp) {
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
index 209af0253fa..1f689ddd607 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
@@ -124,4 +124,4 @@ std::vector<std::string> EphemeralForTestEngine::getAllIdents(OperationContext*
}
return all;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
index 3a76d268efb..a083f9f3a4b 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
@@ -133,4 +133,4 @@ private:
// Notified when we write as everything is considered "journalled" since repl depends on it.
JournalListener* _journalListener = &NoOpJournalListener::instance;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
index dc0d2fa1ed0..dc41fab864c 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
@@ -389,10 +389,8 @@ StatusWith<RecordId> EphemeralForTestRecordStore::extractAndCheckLocForOplog(Wit
return StatusWith<RecordId>(ErrorCodes::BadValue,
str::stream() << "attempted out-of-order oplog insert of "
- << status.getValue()
- << " (oplog last insert was "
- << _data->records.rbegin()->first
- << " )");
+ << status.getValue() << " (oplog last insert was "
+ << _data->records.rbegin()->first << " )");
}
return status;
}
diff --git a/src/mongo/db/storage/journal_listener.h b/src/mongo/db/storage/journal_listener.h
index 275b8ad05d7..88597adb2bc 100644
--- a/src/mongo/db/storage/journal_listener.h
+++ b/src/mongo/db/storage/journal_listener.h
@@ -70,4 +70,4 @@ public:
// As this has no state, it is de facto const and can be safely shared freely.
static NoOpJournalListener instance;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp
index 5c4e269e6ed..98ca9082f41 100644
--- a/src/mongo/db/storage/key_string.cpp
+++ b/src/mongo/db/storage/key_string.cpp
@@ -1315,9 +1315,9 @@ void toBsonValue(uint8_t ctype,
break;
}
- //
- // Numerics
- //
+ //
+ // Numerics
+ //
case CType::kNumericNaN: {
auto type = typeBits->readNumeric();
@@ -1430,7 +1430,7 @@ void toBsonValue(uint8_t ctype,
case CType::kNumericNegativeSmallMagnitude:
inverted = !inverted;
isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
+ // fallthrough (format is the same as positive, but inverted)
case CType::kNumericPositiveSmallMagnitude: {
const uint8_t originalType = typeBits->readNumeric();
@@ -1561,7 +1561,7 @@ void toBsonValue(uint8_t ctype,
case CType::kNumericNegative1ByteInt:
inverted = !inverted;
isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
+ // fallthrough (format is the same as positive, but inverted)
case CType::kNumericPositive1ByteInt:
case CType::kNumericPositive2ByteInt:
@@ -1795,9 +1795,9 @@ void filterKeyFromKeyString(uint8_t ctype, BufReader* reader, bool inverted, Ver
break;
}
- //
- // Numerics
- //
+ //
+ // Numerics
+ //
case CType::kNumericNaN: {
break;
@@ -1836,7 +1836,7 @@ void filterKeyFromKeyString(uint8_t ctype, BufReader* reader, bool inverted, Ver
case CType::kNumericNegativeSmallMagnitude:
inverted = !inverted;
isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
+ // fallthrough (format is the same as positive, but inverted)
case CType::kNumericPositiveSmallMagnitude: {
uint64_t encoded = readType<uint64_t>(reader, inverted);
@@ -1898,7 +1898,7 @@ void filterKeyFromKeyString(uint8_t ctype, BufReader* reader, bool inverted, Ver
case CType::kNumericNegative1ByteInt:
inverted = !inverted;
isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
+ // fallthrough (format is the same as positive, but inverted)
case CType::kNumericPositive1ByteInt:
case CType::kNumericPositive2ByteInt:
diff --git a/src/mongo/db/storage/key_string_test.cpp b/src/mongo/db/storage/key_string_test.cpp
index 0c7bf0b9f5e..7d860b8da6b 100644
--- a/src/mongo/db/storage/key_string_test.cpp
+++ b/src/mongo/db/storage/key_string_test.cpp
@@ -664,7 +664,6 @@ TEST_F(KeyStringBuilderTest, LotsOfNumbers3) {
for (double k = 0; k < 8; k++) {
futures.push_back(stdx::async(stdx::launch::async, [k, this] {
-
for (double i = -1100; i < 1100; i++) {
for (double j = 0; j < 52; j++) {
const auto V1 = KeyString::Version::V1;
@@ -886,10 +885,8 @@ const std::vector<BSONObj>& getInterestingElements(KeyString::Version version) {
// Something with exceptional typeBits for Decimal
elements.push_back(
BSON("" << BSON_ARRAY("" << BSONSymbol("") << Decimal128::kNegativeInfinity
- << Decimal128::kPositiveInfinity
- << Decimal128::kPositiveNaN
- << Decimal128("0.0000000")
- << Decimal128("-0E1000"))));
+ << Decimal128::kPositiveInfinity << Decimal128::kPositiveNaN
+ << Decimal128("0.0000000") << Decimal128("-0E1000"))));
}
//
diff --git a/src/mongo/db/storage/key_string_to_bson_fuzzer.cpp b/src/mongo/db/storage/key_string_to_bson_fuzzer.cpp
index 7208a589c54..8ec66a5c14b 100644
--- a/src/mongo/db/storage/key_string_to_bson_fuzzer.cpp
+++ b/src/mongo/db/storage/key_string_to_bson_fuzzer.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/db/storage/key_string.h"
#include "mongo/bson/bson_validate.h"
+#include "mongo/db/storage/key_string.h"
const mongo::Ordering kAllAscending = mongo::Ordering::make(mongo::BSONObj());
const mongo::Ordering kOneDescending = mongo::Ordering::make(BSON("a" << -1));
diff --git a/src/mongo/db/storage/kv/durable_catalog_test.cpp b/src/mongo/db/storage/kv/durable_catalog_test.cpp
index c125d8a8d22..f76ad1830eb 100644
--- a/src/mongo/db/storage/kv/durable_catalog_test.cpp
+++ b/src/mongo/db/storage/kv/durable_catalog_test.cpp
@@ -129,8 +129,7 @@ public:
bool match = (expected == actual);
if (!match) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expected) << ", "
- << "Actual: "
- << dumpMultikeyPaths(actual));
+ << "Actual: " << dumpMultikeyPaths(actual));
}
ASSERT(match);
}
diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h
index d8b5772cd3b..523a487feed 100644
--- a/src/mongo/db/storage/kv/kv_engine.h
+++ b/src/mongo/db/storage/kv/kv_engine.h
@@ -438,4 +438,4 @@ protected:
*/
const int64_t kDefaultCappedSizeBytes = 4096;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index 17ad60721d7..7b9bfb7a5f4 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -168,9 +168,7 @@ TEST(KVEngineTestHarness, SimpleSorted1) {
IndexDescriptor desc(collection.get(),
"",
BSON("v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion) << "ns"
- << ns.ns()
- << "key"
- << BSON("a" << 1)));
+ << ns.ns() << "key" << BSON("a" << 1)));
std::unique_ptr<SortedDataInterface> sorted;
{
MyOperationContext opCtx(engine);
@@ -706,10 +704,7 @@ DEATH_TEST_F(DurableCatalogImplTest, TerminateOnNonNumericIndexVersion, "Fatal A
"",
BSON("v"
<< "1"
- << "ns"
- << ns.ns()
- << "key"
- << BSON("a" << 1)));
+ << "ns" << ns.ns() << "key" << BSON("a" << 1)));
std::unique_ptr<SortedDataInterface> sorted;
{
MyOperationContext opCtx(engine);
diff --git a/src/mongo/db/storage/kv/kv_prefix.cpp b/src/mongo/db/storage/kv/kv_prefix.cpp
index 078446493bc..6b88dc22c3b 100644
--- a/src/mongo/db/storage/kv/kv_prefix.cpp
+++ b/src/mongo/db/storage/kv/kv_prefix.cpp
@@ -70,4 +70,4 @@ std::string KVPrefix::toString() const {
stdx::lock_guard<stdx::mutex> lk(_nextValueMutex);
return KVPrefix(_nextValue++);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/kv/kv_prefix.h b/src/mongo/db/storage/kv/kv_prefix.h
index ee35720cbe5..6a785dc19db 100644
--- a/src/mongo/db/storage/kv/kv_prefix.h
+++ b/src/mongo/db/storage/kv/kv_prefix.h
@@ -100,4 +100,4 @@ private:
inline std::ostream& operator<<(std::ostream& s, const KVPrefix& prefix) {
return (s << prefix.toString());
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/kv/temporary_kv_record_store.h b/src/mongo/db/storage/kv/temporary_kv_record_store.h
index a992ca69cd4..f4b7c6033bd 100644
--- a/src/mongo/db/storage/kv/temporary_kv_record_store.h
+++ b/src/mongo/db/storage/kv/temporary_kv_record_store.h
@@ -53,8 +53,7 @@ public:
// Move constructor.
TemporaryKVRecordStore(TemporaryKVRecordStore&& other) noexcept
- : TemporaryRecordStore(std::move(other._rs)),
- _kvEngine(other._kvEngine) {}
+ : TemporaryRecordStore(std::move(other._rs)), _kvEngine(other._kvEngine) {}
~TemporaryKVRecordStore();
diff --git a/src/mongo/db/storage/mobile/mobile_session_pool.h b/src/mongo/db/storage/mobile/mobile_session_pool.h
index 605117e6983..08586e0ece8 100644
--- a/src/mongo/db/storage/mobile/mobile_session_pool.h
+++ b/src/mongo/db/storage/mobile/mobile_session_pool.h
@@ -102,8 +102,8 @@ public:
private:
/**
- * Gets the front element from _sessions and then pops it off the queue.
- */
+ * Gets the front element from _sessions and then pops it off the queue.
+ */
sqlite3* _popSession_inlock();
// This is used to lock the _sessions vector.
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index 21e70b95143..ad6747b2ad1 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -602,4 +602,4 @@ public:
const RecordData& recordData,
size_t* dataSize) = 0;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp
index 9f4214aef1f..1fb200c5105 100644
--- a/src/mongo/db/storage/record_store_test_harness.cpp
+++ b/src/mongo/db/storage/record_store_test_harness.cpp
@@ -38,8 +38,8 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
TEST(RecordStoreTestHarness, Simple1) {
const auto harnessHelper(newRecordStoreHarnessHelper());
diff --git a/src/mongo/db/storage/record_store_test_randomiter.cpp b/src/mongo/db/storage/record_store_test_randomiter.cpp
index c9c9757d827..dda51057e6d 100644
--- a/src/mongo/db/storage/record_store_test_randomiter.cpp
+++ b/src/mongo/db/storage/record_store_test_randomiter.cpp
@@ -38,10 +38,10 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::set;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Create a random iterator for empty record store.
TEST(RecordStoreTestHarness, GetRandomIteratorEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_recorditer.cpp b/src/mongo/db/storage/record_store_test_recorditer.cpp
index 38a5f356aad..c50ebba023c 100644
--- a/src/mongo/db/storage/record_store_test_recorditer.cpp
+++ b/src/mongo/db/storage/record_store_test_recorditer.cpp
@@ -42,9 +42,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Insert multiple records and iterate through them in the forward direction.
// When curr() or getNext() is called on an iterator positioned at EOF,
diff --git a/src/mongo/db/storage/record_store_test_recordstore.cpp b/src/mongo/db/storage/record_store_test_recordstore.cpp
index 38649fa89da..a10adfa75fa 100644
--- a/src/mongo/db/storage/record_store_test_recordstore.cpp
+++ b/src/mongo/db/storage/record_store_test_recordstore.cpp
@@ -38,8 +38,8 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
// Verify that the name of the record store is not NULL and nonempty.
TEST(RecordStoreTestHarness, RecordStoreName) {
diff --git a/src/mongo/db/storage/record_store_test_repairiter.cpp b/src/mongo/db/storage/record_store_test_repairiter.cpp
index 74aa0237cbc..cad095d0286 100644
--- a/src/mongo/db/storage/record_store_test_repairiter.cpp
+++ b/src/mongo/db/storage/record_store_test_repairiter.cpp
@@ -40,10 +40,10 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::set;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Create an iterator for repairing an empty record store.
TEST(RecordStoreTestHarness, GetIteratorForRepairEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_storagesize.cpp b/src/mongo/db/storage/record_store_test_storagesize.cpp
index cba9e555c16..57b126b37a5 100644
--- a/src/mongo/db/storage/record_store_test_storagesize.cpp
+++ b/src/mongo/db/storage/record_store_test_storagesize.cpp
@@ -38,9 +38,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Verify that a nonempty collection maybe takes up some space on disk.
TEST(RecordStoreTestHarness, StorageSizeNonEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_touch.cpp b/src/mongo/db/storage/record_store_test_touch.cpp
index edafde0683e..ead23584b87 100644
--- a/src/mongo/db/storage/record_store_test_touch.cpp
+++ b/src/mongo/db/storage/record_store_test_touch.cpp
@@ -38,9 +38,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Verify that calling touch() on an empty collection returns an OK status.
TEST(RecordStoreTestHarness, TouchEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_truncate.cpp b/src/mongo/db/storage/record_store_test_truncate.cpp
index d05e3e9a117..a37c9a6681c 100644
--- a/src/mongo/db/storage/record_store_test_truncate.cpp
+++ b/src/mongo/db/storage/record_store_test_truncate.cpp
@@ -38,9 +38,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Verify that calling truncate() on an already empty collection returns an OK status.
TEST(RecordStoreTestHarness, TruncateEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_updaterecord.cpp b/src/mongo/db/storage/record_store_test_updaterecord.cpp
index d6f16586cde..b07d215cfa0 100644
--- a/src/mongo/db/storage/record_store_test_updaterecord.cpp
+++ b/src/mongo/db/storage/record_store_test_updaterecord.cpp
@@ -38,9 +38,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Insert a record and try to update it.
TEST(RecordStoreTestHarness, UpdateRecord) {
diff --git a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
index 298685c7285..9753e7d76b6 100644
--- a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
+++ b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
@@ -40,8 +40,8 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
// Insert a record and try to perform an in-place update on it.
TEST(RecordStoreTestHarness, UpdateWithDamages) {
diff --git a/src/mongo/db/storage/remove_saver.cpp b/src/mongo/db/storage/remove_saver.cpp
index 16ad9b482e9..7012f671c37 100644
--- a/src/mongo/db/storage/remove_saver.cpp
+++ b/src/mongo/db/storage/remove_saver.cpp
@@ -45,8 +45,8 @@
using std::ios_base;
using std::ofstream;
-using std::stringstream;
using std::string;
+using std::stringstream;
namespace mongo {
diff --git a/src/mongo/db/storage/snapshot.h b/src/mongo/db/storage/snapshot.h
index 57045aae502..d169e4dada0 100644
--- a/src/mongo/db/storage/snapshot.h
+++ b/src/mongo/db/storage/snapshot.h
@@ -93,4 +93,4 @@ private:
SnapshotId _id;
T _value;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
index 895fc5560d9..20aba3337b3 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
@@ -40,12 +40,15 @@ namespace {
void testSetEndPosition_Next_Forward(bool unique, bool inclusive) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(
- unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ {key4, loc1},
+ {key5, loc1},
+ });
// Dup key on end point. Illegal for unique indexes.
if (!unique)
@@ -80,12 +83,15 @@ TEST(SortedDataInterface, SetEndPosition_Next_Forward_Standard_Exclusive) {
void testSetEndPosition_Next_Reverse(bool unique, bool inclusive) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(
- unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ {key4, loc1},
+ {key5, loc1},
+ });
// Dup key on end point. Illegal for unique indexes.
if (!unique)
@@ -220,12 +226,14 @@ TEST(SortedDataInterface, SetEndPosition_Seek_Reverse_Standard_Exclusive) {
void testSetEndPosition_Restore_Forward(bool unique) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(
- unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ {key4, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
cursor->setEndPosition(key3, false); // Should never see key3 or key4.
@@ -241,7 +249,8 @@ void testSetEndPosition_Restore_Forward(bool unique) {
removeFromIndex(opCtx,
sorted,
{
- {key2, loc1}, {key3, loc1},
+ {key2, loc1},
+ {key3, loc1},
});
cursor->restore();
@@ -257,12 +266,14 @@ TEST(SortedDataInterface, SetEndPosition_Restore_Forward_Standard) {
void testSetEndPosition_Restore_Reverse(bool unique) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(
- unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ {key4, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
cursor->setEndPosition(key2, false); // Should never see key1 or key2.
@@ -278,7 +289,8 @@ void testSetEndPosition_Restore_Reverse(bool unique) {
removeFromIndex(opCtx,
sorted,
{
- {key2, loc1}, {key3, loc1},
+ {key2, loc1},
+ {key3, loc1},
});
cursor->restore();
@@ -302,7 +314,8 @@ void testSetEndPosition_RestoreEndCursor_Forward(bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(unique,
/*partial=*/false,
{
- {key1, loc1}, {key4, loc1},
+ {key1, loc1},
+ {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -337,7 +350,8 @@ void testSetEndPosition_RestoreEndCursor_Reverse(bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(unique,
/*partial=*/false,
{
- {key1, loc1}, {key4, loc1},
+ {key1, loc1},
+ {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -370,12 +384,13 @@ TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Reverse_Unique) {
void testSetEndPosition_Empty_Forward(bool unique, bool inclusive) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
cursor->setEndPosition(BSONObj(), inclusive);
@@ -401,12 +416,13 @@ TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Standard_Exclusive) {
void testSetEndPosition_Empty_Reverse(bool unique, bool inclusive) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
cursor->setEndPosition(BSONObj(), inclusive);
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
index 88a43ed0005..30d207d5031 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
@@ -300,7 +300,8 @@ void testSaveAndRestorePositionSeesNewInserts(bool forward, bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(unique,
/*partial=*/false,
{
- {key1, loc1}, {key3, loc1},
+ {key1, loc1},
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -335,7 +336,8 @@ void testSaveAndRestorePositionSeesNewInsertsAfterRemove(bool forward, bool uniq
auto sorted = harnessHelper->newSortedDataInterface(unique,
/*partial=*/false,
{
- {key1, loc1}, {key3, loc1},
+ {key1, loc1},
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -414,12 +416,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Reverse_S
TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_Forward) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(/*unique*/ false,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(/*unique*/ false,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
@@ -497,12 +500,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_For
TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_Reverse) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(/*unique*/ false,
- /*partial=*/false,
- {
- {key0, loc1}, {key1, loc1}, {key2, loc2},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(/*unique*/ false,
+ /*partial=*/false,
+ {
+ {key0, loc1},
+ {key1, loc1},
+ {key2, loc2},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -580,12 +584,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_Rev
TEST(SortedDataInterface, SaveUnpositionedAndRestore) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(/*unique=*/false,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(/*unique=*/false,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
index fa608652d8f..4a0584e0559 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
@@ -40,12 +40,13 @@ namespace {
void testSeekExact_Hit(bool unique, bool forward) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -111,7 +112,10 @@ TEST(SortedDataInterface, SeekExact_HitWithDups_Forward) {
/*unique=*/false,
/*partial=*/false,
{
- {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
+ {key1, loc1},
+ {key2, loc1},
+ {key2, loc2},
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -131,7 +135,10 @@ TEST(SortedDataInterface, SeekExact_HitWithDups_Reverse) {
/*unique=*/false,
/*partial=*/false,
{
- {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
+ {key1, loc1},
+ {key2, loc1},
+ {key2, loc2},
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index 1b0bf3e6216..24e3781fba1 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -151,8 +151,8 @@ public:
};
/**
- * The destructor should only be called if we are tearing down but not exiting the process.
- */
+ * The destructor should only be called if we are tearing down but not exiting the process.
+ */
virtual ~StorageEngine() {}
/**
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index 57dcb407253..9e003e4fa5b 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -93,8 +93,8 @@ void StorageEngineImpl::loadCatalog(OperationContext* opCtx) {
if (status.code() == ErrorCodes::DataModifiedByRepair) {
warning() << "Catalog data modified by repair: " << status.reason();
- repairObserver->onModification(str::stream() << "DurableCatalog repaired: "
- << status.reason());
+ repairObserver->onModification(str::stream()
+ << "DurableCatalog repaired: " << status.reason());
} else {
fassertNoTrace(50926, status);
}
@@ -210,8 +210,8 @@ void StorageEngineImpl::loadCatalog(OperationContext* opCtx) {
if (_options.forRepair) {
StorageRepairObserver::get(getGlobalServiceContext())
- ->onModification(str::stream() << "Collection " << nss << " dropped: "
- << status.reason());
+ ->onModification(str::stream() << "Collection " << nss
+ << " dropped: " << status.reason());
}
wuow.commit();
continue;
@@ -299,8 +299,8 @@ Status StorageEngineImpl::_recoverOrphanedCollection(OperationContext* opCtx,
}
if (dataModified) {
StorageRepairObserver::get(getGlobalServiceContext())
- ->onModification(str::stream() << "Collection " << collectionName << " recovered: "
- << status.reason());
+ ->onModification(str::stream() << "Collection " << collectionName
+ << " recovered: " << status.reason());
}
wuow.commit();
return Status::OK();
@@ -398,8 +398,7 @@ StorageEngineImpl::reconcileCatalogAndIdents(OperationContext* opCtx) {
if (engineIdents.find(identForColl) == engineIdents.end()) {
return {ErrorCodes::UnrecoverableRollbackError,
str::stream() << "Expected collection does not exist. Collection: " << coll
- << " Ident: "
- << identForColl};
+ << " Ident: " << identForColl};
}
}
}
@@ -495,8 +494,8 @@ StorageEngineImpl::reconcileCatalogAndIdents(OperationContext* opCtx) {
for (auto&& indexName : indexesToDrop) {
invariant(metaData.eraseIndex(indexName),
- str::stream() << "Index is missing. Collection: " << coll << " Index: "
- << indexName);
+ str::stream()
+ << "Index is missing. Collection: " << coll << " Index: " << indexName);
}
if (indexesToDrop.size() > 0) {
WriteUnitOfWork wuow(opCtx);
@@ -684,8 +683,8 @@ Status StorageEngineImpl::repairRecordStore(OperationContext* opCtx, const Names
}
if (dataModified) {
- repairObserver->onModification(str::stream() << "Collection " << nss << ": "
- << status.reason());
+ repairObserver->onModification(str::stream()
+ << "Collection " << nss << ": " << status.reason());
}
// After repairing, re-initialize the collection with a valid RecordStore.
@@ -825,8 +824,8 @@ void StorageEngineImpl::_dumpCatalog(OperationContext* opCtx) {
while (rec) {
// This should only be called by a parent that's done an appropriate `shouldLog` check. Do
// not duplicate the log level policy.
- LOG_FOR_RECOVERY(kCatalogLogLevel) << "\tId: " << rec->id
- << " Value: " << rec->data.toBson();
+ LOG_FOR_RECOVERY(kCatalogLogLevel)
+ << "\tId: " << rec->id << " Value: " << rec->data.toBson();
auto valueBson = rec->data.toBson();
if (valueBson.hasField("md")) {
std::string ns = valueBson.getField("md").Obj().getField("ns").String();
diff --git a/src/mongo/db/storage/storage_engine_init.cpp b/src/mongo/db/storage/storage_engine_init.cpp
index 5377f91dfb2..281a32fdc53 100644
--- a/src/mongo/db/storage/storage_engine_init.cpp
+++ b/src/mongo/db/storage/storage_engine_init.cpp
@@ -106,14 +106,12 @@ void initializeStorageEngine(ServiceContext* service, const StorageEngineInitFla
getFactoryForStorageEngine(service, storageGlobalParams.engine);
if (factory) {
uassert(28662,
- str::stream() << "Cannot start server. Detected data files in " << dbpath
- << " created by"
- << " the '"
- << *existingStorageEngine
- << "' storage engine, but the"
- << " specified storage engine was '"
- << factory->getCanonicalName()
- << "'.",
+ str::stream()
+ << "Cannot start server. Detected data files in " << dbpath
+ << " created by"
+ << " the '" << *existingStorageEngine << "' storage engine, but the"
+ << " specified storage engine was '" << factory->getCanonicalName()
+ << "'.",
factory->getCanonicalName() == *existingStorageEngine);
}
} else {
@@ -156,8 +154,7 @@ void initializeStorageEngine(ServiceContext* service, const StorageEngineInitFla
uassert(34368,
str::stream()
<< "Server was started in read-only mode, but the configured storage engine, "
- << storageGlobalParams.engine
- << ", does not support read-only operation",
+ << storageGlobalParams.engine << ", does not support read-only operation",
factory->supportsReadOnly());
}
@@ -223,9 +220,7 @@ void createLockFile(ServiceContext* service) {
} catch (const std::exception& ex) {
uassert(28596,
str::stream() << "Unable to determine status of lock file in the data directory "
- << storageGlobalParams.dbpath
- << ": "
- << ex.what(),
+ << storageGlobalParams.dbpath << ": " << ex.what(),
false);
}
const bool wasUnclean = lockFile->createdByUncleanShutdown();
diff --git a/src/mongo/db/storage/storage_engine_interface.h b/src/mongo/db/storage/storage_engine_interface.h
index db7201c1492..77703c8aa6f 100644
--- a/src/mongo/db/storage/storage_engine_interface.h
+++ b/src/mongo/db/storage/storage_engine_interface.h
@@ -46,4 +46,4 @@ public:
StringData ident) = 0;
virtual DurableCatalog* getCatalog() = 0;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
index b39b0503547..c0398eddec3 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
@@ -67,8 +67,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
massert(40387,
- str::stream() << "Couldn't open directory '" << dir.string() << "' for flushing: "
- << errnoWithDescription(),
+ str::stream() << "Couldn't open directory '" << dir.string()
+ << "' for flushing: " << errnoWithDescription(),
fd >= 0);
if (fsync(fd) != 0) {
int e = errno;
@@ -85,8 +85,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
} else {
close(fd);
massert(40388,
- str::stream() << "Couldn't fsync directory '" << dir.string() << "': "
- << errnoWithDescription(e),
+ str::stream() << "Couldn't fsync directory '" << dir.string()
+ << "': " << errnoWithDescription(e),
false);
}
}
@@ -136,8 +136,7 @@ Status StorageEngineLockFile::open() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Unable to check existence of data directory " << _dbpath
- << ": "
- << ex.what());
+ << ": " << ex.what());
}
// Use file permissions 644
@@ -153,13 +152,11 @@ Status StorageEngineLockFile::open() {
}
return Status(ErrorCodes::DBPathInUse,
str::stream() << "Unable to create/open the lock file: " << _filespec << " ("
- << errnoWithDescription(errorcode)
- << ")."
+ << errnoWithDescription(errorcode) << ")."
<< " Ensure the user executing mongod is the owner of the lock "
"file and has the appropriate permissions. Also make sure "
"that another mongod instance is not already running on the "
- << _dbpath
- << " directory");
+ << _dbpath << " directory");
}
int ret = ::flock(lockFile, LOCK_EX | LOCK_NB);
if (ret != 0) {
@@ -167,11 +164,9 @@ Status StorageEngineLockFile::open() {
::close(lockFile);
return Status(ErrorCodes::DBPathInUse,
str::stream() << "Unable to lock the lock file: " << _filespec << " ("
- << errnoWithDescription(errorcode)
- << ")."
+ << errnoWithDescription(errorcode) << ")."
<< " Another mongod instance is already running on the "
- << _dbpath
- << " directory");
+ << _dbpath << " directory");
}
_lockFileHandle->_fd = lockFile;
return Status::OK();
@@ -197,9 +192,7 @@ Status StorageEngineLockFile::writeString(StringData str) {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write string to file (ftruncate failed): "
- << _filespec
- << ' '
- << errnoWithDescription(errorcode));
+ << _filespec << ' ' << errnoWithDescription(errorcode));
}
int bytesWritten = ::write(_lockFileHandle->_fd, str.rawData(), str.size());
@@ -207,8 +200,7 @@ Status StorageEngineLockFile::writeString(StringData str) {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write string " << str << " to file: " << _filespec
- << ' '
- << errnoWithDescription(errorcode));
+ << ' ' << errnoWithDescription(errorcode));
} else if (bytesWritten == 0) {
return Status(ErrorCodes::FileStreamFailed,
@@ -220,9 +212,7 @@ Status StorageEngineLockFile::writeString(StringData str) {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << str
- << " to file (fsync failed): "
- << _filespec
- << ' '
+ << " to file (fsync failed): " << _filespec << ' '
<< errnoWithDescription(errorcode));
}
diff --git a/src/mongo/db/storage/storage_engine_lock_file_test.cpp b/src/mongo/db/storage/storage_engine_lock_file_test.cpp
index 153040ef874..df4967e2d41 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_test.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_test.cpp
@@ -46,8 +46,8 @@
namespace {
-using std::string;
using mongo::unittest::TempDir;
+using std::string;
using namespace mongo;
diff --git a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
index 2be6f11bb03..4055318d1d8 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
@@ -108,8 +108,7 @@ Status StorageEngineLockFile::open() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Unable to check existence of data directory " << _dbpath
- << ": "
- << ex.what());
+ << ": " << ex.what());
}
HANDLE lockFileHandle = CreateFileW(toNativeString(_filespec.c_str()).c_str(),
@@ -130,13 +129,11 @@ Status StorageEngineLockFile::open() {
}
return Status(ErrorCodes::DBPathInUse,
str::stream() << "Unable to create/open the lock file: " << _filespec << " ("
- << errnoWithDescription(errorcode)
- << ")."
+ << errnoWithDescription(errorcode) << ")."
<< " Ensure the user executing mongod is the owner of the lock "
"file and has the appropriate permissions. Also make sure "
"that another mongod instance is not already running on the "
- << _dbpath
- << " directory");
+ << _dbpath << " directory");
}
_lockFileHandle->_handle = lockFileHandle;
return Status::OK();
@@ -171,8 +168,7 @@ Status StorageEngineLockFile::writeString(StringData str) {
int errorcode = GetLastError();
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write string " << str << " to file: " << _filespec
- << ' '
- << errnoWithDescription(errorcode));
+ << ' ' << errnoWithDescription(errorcode));
} else if (bytesWritten == 0) {
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write string " << str << " to file: " << _filespec
diff --git a/src/mongo/db/storage/storage_engine_metadata.cpp b/src/mongo/db/storage/storage_engine_metadata.cpp
index 62fecc4c102..ecf401f3ee9 100644
--- a/src/mongo/db/storage/storage_engine_metadata.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata.cpp
@@ -142,13 +142,13 @@ Status StorageEngineMetadata::read() {
boost::uintmax_t fileSize = boost::filesystem::file_size(metadataPath);
if (fileSize == 0) {
return Status(ErrorCodes::InvalidPath,
- str::stream() << "Metadata file " << metadataPath.string()
- << " cannot be empty.");
+ str::stream()
+ << "Metadata file " << metadataPath.string() << " cannot be empty.");
}
if (fileSize == static_cast<boost::uintmax_t>(-1)) {
return Status(ErrorCodes::InvalidPath,
- str::stream() << "Unable to determine size of metadata file "
- << metadataPath.string());
+ str::stream()
+ << "Unable to determine size of metadata file " << metadataPath.string());
}
std::vector<char> buffer(fileSize);
@@ -156,23 +156,21 @@ Status StorageEngineMetadata::read() {
std::ifstream ifs(metadataPath.c_str(), std::ios_base::in | std::ios_base::binary);
if (!ifs) {
return Status(ErrorCodes::FileNotOpen,
- str::stream() << "Failed to read metadata from "
- << metadataPath.string());
+ str::stream()
+ << "Failed to read metadata from " << metadataPath.string());
}
// Read BSON from file
ifs.read(&buffer[0], buffer.size());
if (!ifs) {
return Status(ErrorCodes::FileStreamFailed,
- str::stream() << "Unable to read BSON data from "
- << metadataPath.string());
+ str::stream()
+ << "Unable to read BSON data from " << metadataPath.string());
}
} catch (const std::exception& ex) {
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unexpected error reading BSON data from "
- << metadataPath.string()
- << ": "
- << ex.what());
+ << metadataPath.string() << ": " << ex.what());
}
ConstDataRange cdr(&buffer[0], buffer.size());
@@ -232,8 +230,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
massert(13650,
- str::stream() << "Couldn't open directory '" << dir.string() << "' for flushing: "
- << errnoWithDescription(),
+ str::stream() << "Couldn't open directory '" << dir.string()
+ << "' for flushing: " << errnoWithDescription(),
fd >= 0);
if (fsync(fd) != 0) {
int e = errno;
@@ -250,8 +248,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
} else {
close(fd);
massert(13651,
- str::stream() << "Couldn't fsync directory '" << dir.string() << "': "
- << errnoWithDescription(e),
+ str::stream() << "Couldn't fsync directory '" << dir.string()
+ << "': " << errnoWithDescription(e),
false);
}
}
@@ -270,9 +268,9 @@ Status StorageEngineMetadata::write() const {
{
std::ofstream ofs(metadataTempPath.c_str(), std::ios_base::out | std::ios_base::binary);
if (!ofs) {
- return Status(
- ErrorCodes::FileNotOpen,
- str::stream() << "Failed to write metadata to " << metadataTempPath.string() << ": "
+ return Status(ErrorCodes::FileNotOpen,
+ str::stream()
+ << "Failed to write metadata to " << metadataTempPath.string() << ": "
<< errnoWithDescription());
}
@@ -281,10 +279,9 @@ Status StorageEngineMetadata::write() const {
ofs.write(obj.objdata(), obj.objsize());
if (!ofs) {
return Status(ErrorCodes::OperationFailed,
- str::stream() << "Failed to write BSON data to "
- << metadataTempPath.string()
- << ": "
- << errnoWithDescription());
+ str::stream()
+ << "Failed to write BSON data to " << metadataTempPath.string()
+ << ": " << errnoWithDescription());
}
}
@@ -304,11 +301,8 @@ Status StorageEngineMetadata::write() const {
} catch (const std::exception& ex) {
return Status(ErrorCodes::FileRenameFailed,
str::stream() << "Unexpected error while renaming temporary metadata file "
- << metadataTempPath.string()
- << " to "
- << metadataPath.string()
- << ": "
- << ex.what());
+ << metadataTempPath.string() << " to " << metadataPath.string()
+ << ": " << ex.what());
}
return Status::OK();
@@ -324,21 +318,16 @@ Status StorageEngineMetadata::validateStorageEngineOption<bool>(
ErrorCodes::InvalidOptions,
str::stream()
<< "Requested option conflicts with the current storage engine option for "
- << fieldName
- << "; you requested "
- << (expectedValue ? "true" : "false")
+ << fieldName << "; you requested " << (expectedValue ? "true" : "false")
<< " but the current server storage is implicitly set to "
- << (*defaultValue ? "true" : "false")
- << " and cannot be changed");
+ << (*defaultValue ? "true" : "false") << " and cannot be changed");
}
return Status::OK();
}
if (!element.isBoolean()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Expected boolean field " << fieldName << " but got "
- << typeName(element.type())
- << " instead: "
- << element);
+ << typeName(element.type()) << " instead: " << element);
}
if (element.boolean() == expectedValue) {
return Status::OK();
@@ -346,12 +335,9 @@ Status StorageEngineMetadata::validateStorageEngineOption<bool>(
return Status(
ErrorCodes::InvalidOptions,
str::stream() << "Requested option conflicts with current storage engine option for "
- << fieldName
- << "; you requested "
- << (expectedValue ? "true" : "false")
+ << fieldName << "; you requested " << (expectedValue ? "true" : "false")
<< " but the current server storage is already set to "
- << (element.boolean() ? "true" : "false")
- << " and cannot be changed");
+ << (element.boolean() ? "true" : "false") << " and cannot be changed");
}
} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_metadata_test.cpp b/src/mongo/db/storage/storage_engine_metadata_test.cpp
index 0e1e59dc5c2..57e386644d6 100644
--- a/src/mongo/db/storage/storage_engine_metadata_test.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata_test.cpp
@@ -44,8 +44,8 @@
namespace {
-using std::string;
using mongo::unittest::TempDir;
+using std::string;
using namespace mongo;
diff --git a/src/mongo/db/storage/storage_file_util.cpp b/src/mongo/db/storage/storage_file_util.cpp
index c267b292ee1..dd47a85642d 100644
--- a/src/mongo/db/storage/storage_file_util.cpp
+++ b/src/mongo/db/storage/storage_file_util.cpp
@@ -72,8 +72,8 @@ Status fsyncParentDirectory(const boost::filesystem::path& file) {
int fd = ::open(dir.string().c_str(), O_RDONLY);
if (fd < 0) {
return {ErrorCodes::FileOpenFailed,
- str::stream() << "Failed to open directory " << dir.string() << " for flushing: "
- << errnoWithDescription()};
+ str::stream() << "Failed to open directory " << dir.string()
+ << " for flushing: " << errnoWithDescription()};
}
if (fsync(fd) != 0) {
int e = errno;
@@ -82,8 +82,8 @@ Status fsyncParentDirectory(const boost::filesystem::path& file) {
} else {
close(fd);
return {ErrorCodes::OperationFailed,
- str::stream() << "Failed to fsync directory '" << dir.string() << "': "
- << errnoWithDescription(e)};
+ str::stream() << "Failed to fsync directory '" << dir.string()
+ << "': " << errnoWithDescription(e)};
}
}
close(fd);
@@ -102,9 +102,7 @@ Status fsyncRename(const boost::filesystem::path& source, const boost::filesyste
if (ec) {
return {ErrorCodes::FileRenameFailed,
str::stream() << "Error renaming data file from " << source.string() << " to "
- << dest.string()
- << ": "
- << ec.message()};
+ << dest.string() << ": " << ec.message()};
}
auto status = fsyncFile(dest);
if (!status.isOK()) {
diff --git a/src/mongo/db/storage/storage_init.cpp b/src/mongo/db/storage/storage_init.cpp
index fb1d025289d..1da860e1e0d 100644
--- a/src/mongo/db/storage/storage_init.cpp
+++ b/src/mongo/db/storage/storage_init.cpp
@@ -63,17 +63,12 @@ public:
<< (oldestRequiredTimestampForCrashRecovery
? *oldestRequiredTimestampForCrashRecovery
: Timestamp())
- << "supportsPendingDrops"
- << engine->supportsPendingDrops()
+ << "supportsPendingDrops" << engine->supportsPendingDrops()
<< "dropPendingIdents"
<< static_cast<long long>(engine->getDropPendingIdents().size())
- << "supportsSnapshotReadConcern"
- << engine->supportsReadConcernSnapshot()
- << "readOnly"
- << storageGlobalParams.readOnly
- << "persistent"
- << !engine->isEphemeral()
- << "backupCursorOpen"
+ << "supportsSnapshotReadConcern" << engine->supportsReadConcernSnapshot()
+ << "readOnly" << storageGlobalParams.readOnly << "persistent"
+ << !engine->isEphemeral() << "backupCursorOpen"
<< backupCursorHooks->isBackupCursorOpen());
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h
index abadc810e6a..2c321725173 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h
@@ -77,4 +77,4 @@ protected:
WT_CURSOR* _cursor = nullptr; // Owned
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index caf4bb11cbd..eb9326f0625 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -144,8 +144,7 @@ StatusWith<std::string> WiredTigerIndex::parseIndexOptions(const BSONObj& option
// Return error on first unrecognized field.
return StatusWith<std::string>(ErrorCodes::InvalidOptions,
str::stream() << '\'' << elem.fieldNameStringData()
- << '\''
- << " is not a supported option.");
+ << '\'' << " is not a supported option.");
}
}
return StatusWith<std::string>(ss.str());
@@ -337,10 +336,10 @@ void WiredTigerIndex::fullValidate(OperationContext* opCtx,
warning() << msg;
fullResults->warnings.push_back(msg);
} else if (err) {
- std::string msg = str::stream() << "verify() returned " << wiredtiger_strerror(err)
- << ". "
- << "This indicates structural damage. "
- << "Not examining individual index entries.";
+ std::string msg = str::stream()
+ << "verify() returned " << wiredtiger_strerror(err) << ". "
+ << "This indicates structural damage. "
+ << "Not examining individual index entries.";
error() << msg;
fullResults->errors.push_back(msg);
fullResults->valid = false;
@@ -538,12 +537,11 @@ KeyString::Version WiredTigerIndex::_handleVersionInfo(OperationContext* ctx,
ctx, uri, kMinimumIndexVersion, kMaximumIndexVersion);
if (!version.isOK()) {
Status versionStatus = version.getStatus();
- Status indexVersionStatus(
- ErrorCodes::UnsupportedFormat,
- str::stream() << versionStatus.reason() << " Index: {name: " << desc->indexName()
- << ", ns: "
- << desc->parentNS()
- << "} - version either too old or too new for this mongod.");
+ Status indexVersionStatus(ErrorCodes::UnsupportedFormat,
+ str::stream()
+ << versionStatus.reason() << " Index: {name: "
+ << desc->indexName() << ", ns: " << desc->parentNS()
+ << "} - version either too old or too new for this mongod.");
fassertFailedWithStatusNoTrace(28579, indexVersionStatus);
}
_dataFormatVersion = version.getValue();
@@ -553,14 +551,13 @@ KeyString::Version WiredTigerIndex::_handleVersionInfo(OperationContext* ctx,
_dataFormatVersion == kDataFormatV4KeyStringV1UniqueIndexVersionV2
? Status::OK()
: Status(ErrorCodes::UnsupportedFormat,
- str::stream() << "Index: {name: " << desc->indexName() << ", ns: "
- << desc->parentNS()
- << "} has incompatible format version: "
- << _dataFormatVersion
- << ". MongoDB 4.2 onwards, WT secondary unique indexes use "
- "either format version 11 or 12. See "
- "https://dochub.mongodb.org/core/upgrade-4.2-procedures for "
- "detailed instructions on upgrading the index format.");
+ str::stream()
+ << "Index: {name: " << desc->indexName() << ", ns: " << desc->parentNS()
+ << "} has incompatible format version: " << _dataFormatVersion
+ << ". MongoDB 4.2 onwards, WT secondary unique indexes use "
+ "either format version 11 or 12. See "
+ "https://dochub.mongodb.org/core/upgrade-4.2-procedures for "
+ "detailed instructions on upgrading the index format.");
fassertNoTrace(31179, versionStatus);
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
index 30774f9032e..df17969fc5a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
@@ -301,4 +301,4 @@ public:
bool dupsAllowed) override;
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
index b84d3e812b1..f53623761d1 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
@@ -85,15 +85,9 @@ void _testValidateMetadata(const StorageEngine::Factory* factory,
if (expectedCode != status.code()) {
FAIL(str::stream()
<< "Unexpected StorageEngine::Factory::validateMetadata result. Expected: "
- << ErrorCodes::errorString(expectedCode)
- << " but got "
- << status.toString()
- << " instead. metadataOptions: "
- << metadataOptions
- << "; directoryPerDB: "
- << directoryPerDB
- << "; directoryForIndexes: "
- << directoryForIndexes);
+ << ErrorCodes::errorString(expectedCode) << " but got " << status.toString()
+ << " instead. metadataOptions: " << metadataOptions << "; directoryPerDB: "
+ << directoryPerDB << "; directoryForIndexes: " << directoryForIndexes);
}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 026e7d59c74..09566bd3465 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -1719,8 +1719,7 @@ StatusWith<Timestamp> WiredTigerKVEngine::recoverToStableTimestamp(OperationCont
str::stream()
<< "No stable timestamp available to recover to. Initial data timestamp: "
<< initialDataTS.toString()
- << ", Stable timestamp: "
- << stableTS.toString());
+ << ", Stable timestamp: " << stableTS.toString());
}
LOG_FOR_ROLLBACK(2) << "WiredTiger::RecoverToStableTimestamp syncing size storer to disk.";
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index c1502094ec5..6be66bd9128 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -455,4 +455,4 @@ private:
// timestamp. Provided by replication layer because WT does not persist timestamps.
AtomicWord<std::uint64_t> _initialDataTimestamp;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp
index 6e4cbf157ab..90292778505 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp
@@ -50,4 +50,4 @@ MONGO_STARTUP_OPTIONS_STORE(WiredTigerOptions)(InitializerContext* context) {
}
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
index 4901b30d631..d2a25c55a4f 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
@@ -56,9 +56,8 @@ Status applyMaxCacheOverflowSizeGBParameter(WiredTigerMaxCacheOverflowSizeGBPara
int ret = param._data.second->reconfigure(
fmt::format("cache_overflow=(file_max={}M)", valueMB).c_str());
if (ret != 0) {
- string result =
- (str::stream() << "WiredTiger reconfiguration failed with error code (" << ret << "): "
- << wiredtiger_strerror(ret));
+ string result = (str::stream() << "WiredTiger reconfiguration failed with error code ("
+ << ret << "): " << wiredtiger_strerror(ret));
error() << result;
return Status(ErrorCodes::BadValue, result);
@@ -91,9 +90,8 @@ Status WiredTigerEngineRuntimeConfigParameter::setFromString(const std::string&
invariant(_data.second);
int ret = _data.second->reconfigure(str.c_str());
if (ret != 0) {
- string result =
- (str::stream() << "WiredTiger reconfiguration failed with error code (" << ret << "): "
- << wiredtiger_strerror(ret));
+ string result = (str::stream() << "WiredTiger reconfiguration failed with error code ("
+ << ret << "): " << wiredtiger_strerror(ret));
error() << result;
return Status(ErrorCodes::BadValue, result);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp
index df39f655421..4684e13e264 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp
@@ -76,12 +76,8 @@ public:
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
- << "v"
- << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
- << "ns"
- << ns
- << "unique"
- << unique);
+ << "v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
+ << "ns" << ns << "unique" << unique);
if (partial) {
auto partialBSON =
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp
index 549b160913b..5272bc0ca27 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp
@@ -64,9 +64,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
class PrefixedWiredTigerHarnessHelper final : public RecordStoreHarnessHelper {
public:
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 682e48e840c..a904a507827 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -72,8 +72,8 @@
namespace mongo {
using namespace fmt::literals;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
namespace {
@@ -481,8 +481,7 @@ StatusWith<std::string> WiredTigerRecordStore::parseOptionsField(const BSONObj o
// Return error on first unrecognized field.
return StatusWith<std::string>(ErrorCodes::InvalidOptions,
str::stream() << '\'' << elem.fieldNameStringData()
- << '\''
- << " is not a supported option.");
+ << '\'' << " is not a supported option.");
}
}
return StatusWith<std::string>(ss.str());
@@ -645,10 +644,11 @@ WiredTigerRecordStore::WiredTigerRecordStore(WiredTigerKVEngine* kvEngine,
_engineName(params.engineName),
_isCapped(params.isCapped),
_isEphemeral(params.isEphemeral),
- _isLogged(!isTemp() && WiredTigerUtil::useTableLogging(
- NamespaceString(ns()),
- getGlobalReplSettings().usingReplSets() ||
- repl::ReplSettings::shouldRecoverFromOplogAsStandalone())),
+ _isLogged(!isTemp() &&
+ WiredTigerUtil::useTableLogging(
+ NamespaceString(ns()),
+ getGlobalReplSettings().usingReplSets() ||
+ repl::ReplSettings::shouldRecoverFromOplogAsStandalone())),
_isOplog(NamespaceString::oplog(params.ns)),
_cappedMaxSize(params.cappedMaxSize),
_cappedMaxSizeSlack(std::min(params.cappedMaxSize / 10, int64_t(16 * 1024 * 1024))),
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 81c59999fc8..ddd90ba990a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -525,4 +525,4 @@ MONGO_FAIL_POINT_DECLARE(WTWriteConflictExceptionForReads);
// will not be considered durable until deactivated. It is unspecified whether writes that commit
// before activation will become visible while active.
MONGO_FAIL_POINT_DECLARE(WTPausePrimaryOplogDurabilityLoop);
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index 2b59fc2c762..36094cf945a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -56,9 +56,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
TEST(WiredTigerRecordStoreTest, GenerateCreateStringEmptyDocument) {
BSONObj spec = fromjson("{}");
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index 40b916222ff..4a096b0cbba 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -370,8 +370,7 @@ void WiredTigerRecoveryUnit::_txnClose(bool commit) {
str::stream() << "Cannot have both a _lastTimestampSet and a "
"_commitTimestamp. _lastTimestampSet: "
<< _lastTimestampSet->toString()
- << ". _commitTimestamp: "
- << _commitTimestamp.toString());
+ << ". _commitTimestamp: " << _commitTimestamp.toString());
// We reset the _lastTimestampSet between transactions. Since it is legal for one
// transaction on a RecoveryUnit to call setTimestamp() and another to call
@@ -608,8 +607,7 @@ Status WiredTigerRecoveryUnit::setTimestamp(Timestamp timestamp) {
invariant(_prepareTimestamp.isNull());
invariant(_commitTimestamp.isNull(),
str::stream() << "Commit timestamp set to " << _commitTimestamp.toString()
- << " and trying to set WUOW timestamp to "
- << timestamp.toString());
+ << " and trying to set WUOW timestamp to " << timestamp.toString());
invariant(_readAtTimestamp.isNull() || timestamp >= _readAtTimestamp,
str::stream() << "future commit timestamp " << timestamp.toString()
<< " cannot be older than read timestamp "
@@ -636,12 +634,10 @@ void WiredTigerRecoveryUnit::setCommitTimestamp(Timestamp timestamp) {
invariant(!_inUnitOfWork() || !_prepareTimestamp.isNull(), toString(_getState()));
invariant(_commitTimestamp.isNull(),
str::stream() << "Commit timestamp set to " << _commitTimestamp.toString()
- << " and trying to set it to "
- << timestamp.toString());
+ << " and trying to set it to " << timestamp.toString());
invariant(!_lastTimestampSet,
str::stream() << "Last timestamp set is " << _lastTimestampSet->toString()
- << " and trying to set commit timestamp to "
- << timestamp.toString());
+ << " and trying to set commit timestamp to " << timestamp.toString());
invariant(!_isTimestamped);
_commitTimestamp = timestamp;
@@ -655,9 +651,7 @@ void WiredTigerRecoveryUnit::setDurableTimestamp(Timestamp timestamp) {
invariant(
_durableTimestamp.isNull(),
str::stream() << "Trying to reset durable timestamp when it was already set. wasSetTo: "
- << _durableTimestamp.toString()
- << " setTo: "
- << timestamp.toString());
+ << _durableTimestamp.toString() << " setTo: " << timestamp.toString());
_durableTimestamp = timestamp;
}
@@ -681,16 +675,13 @@ void WiredTigerRecoveryUnit::setPrepareTimestamp(Timestamp timestamp) {
invariant(_inUnitOfWork(), toString(_getState()));
invariant(_prepareTimestamp.isNull(),
str::stream() << "Trying to set prepare timestamp to " << timestamp.toString()
- << ". It's already set to "
- << _prepareTimestamp.toString());
+ << ". It's already set to " << _prepareTimestamp.toString());
invariant(_commitTimestamp.isNull(),
str::stream() << "Commit timestamp is " << _commitTimestamp.toString()
- << " and trying to set prepare timestamp to "
- << timestamp.toString());
+ << " and trying to set prepare timestamp to " << timestamp.toString());
invariant(!_lastTimestampSet,
str::stream() << "Last timestamp set is " << _lastTimestampSet->toString()
- << " and trying to set prepare timestamp to "
- << timestamp.toString());
+ << " and trying to set prepare timestamp to " << timestamp.toString());
_prepareTimestamp = timestamp;
}
@@ -730,8 +721,7 @@ void WiredTigerRecoveryUnit::setRoundUpPreparedTimestamps(bool value) {
// This cannot be called after WiredTigerRecoveryUnit::_txnOpen.
invariant(!_isActive(),
str::stream() << "Can't change round up prepared timestamps flag "
- << "when current state is "
- << toString(_getState()));
+ << "when current state is " << toString(_getState()));
_roundUpPreparedTimestamps =
(value) ? RoundUpPreparedTimestamps::kRound : RoundUpPreparedTimestamps::kNoRound;
}
@@ -744,8 +734,7 @@ void WiredTigerRecoveryUnit::setTimestampReadSource(ReadSource readSource,
invariant(!_isActive() || _timestampReadSource == readSource,
str::stream() << "Current state: " << toString(_getState())
<< ". Invalid internal state while setting timestamp read source: "
- << static_cast<int>(readSource)
- << ", provided timestamp: "
+ << static_cast<int>(readSource) << ", provided timestamp: "
<< (provided ? provided->toString() : "none"));
invariant(!provided == (readSource != ReadSource::kProvided));
invariant(!(provided && provided->isNull()));
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
index be6d00fc27f..065e75e3661 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
@@ -60,7 +60,7 @@ public:
false, // .ephemeral
false, // .repair
false // .readOnly
- ) {
+ ) {
repl::ReplicationCoordinator::set(
getGlobalServiceContext(),
std::unique_ptr<repl::ReplicationCoordinator>(new repl::ReplicationCoordinatorMock(
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
index ace8580f465..afb2da1fbed 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
@@ -359,4 +359,4 @@ typedef std::unique_ptr<WiredTigerSession,
UniqueWiredTigerSession;
extern const std::string kWTRepairMsg;
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
index b9096b29279..5db2a4e72bc 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
@@ -103,4 +103,4 @@ private:
mutable stdx::mutex _bufferMutex; // Guards _buffer
Buffer _buffer;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
index c5f2fc17651..75c9777a502 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
@@ -98,4 +98,4 @@ private:
mutable stdx::mutex _localSnapshotMutex; // Guards _localSnapshot.
boost::optional<Timestamp> _localSnapshot;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp
index 0185b30fadd..f03fc201827 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp
@@ -76,12 +76,8 @@ public:
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
- << "v"
- << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
- << "ns"
- << ns
- << "unique"
- << unique);
+ << "v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
+ << "ns" << ns << "unique" << unique);
if (partial) {
auto partialBSON =
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
index 1f790206918..2607ce3ce45 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
@@ -63,9 +63,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
class WiredTigerHarnessHelper final : public RecordStoreHarnessHelper {
public:
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index b00bb678eef..7ff8d2af36a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -187,9 +187,7 @@ Status WiredTigerUtil::getApplicationMetadata(OperationContext* opCtx,
if (keysSeen.count(key)) {
return Status(ErrorCodes::Error(50998),
str::stream() << "app_metadata must not contain duplicate keys. "
- << "Found multiple instances of key '"
- << key
- << "'.");
+ << "Found multiple instances of key '" << key << "'.");
}
keysSeen.insert(key);
@@ -265,9 +263,7 @@ StatusWith<int64_t> WiredTigerUtil::checkApplicationMetadataFormatVersion(Operat
if (version < minimumVersion || version > maximumVersion) {
return Status(ErrorCodes::UnsupportedFormat,
str::stream() << "Application metadata for " << uri
- << " has unsupported format version: "
- << version
- << ".");
+ << " has unsupported format version: " << version << ".");
}
LOG(2) << "WiredTigerUtil::checkApplicationMetadataFormatVersion "
@@ -320,8 +316,7 @@ StatusWith<int64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
if (ret != 0) {
return StatusWith<int64_t>(ErrorCodes::CursorNotFound,
str::stream() << "unable to open cursor at URI " << uri
- << ". reason: "
- << wiredtiger_strerror(ret));
+ << ". reason: " << wiredtiger_strerror(ret));
}
invariant(cursor);
ON_BLOCK_EXIT([&] { cursor->close(cursor); });
@@ -329,21 +324,19 @@ StatusWith<int64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
cursor->set_key(cursor, statisticsKey);
ret = cursor->search(cursor);
if (ret != 0) {
- return StatusWith<int64_t>(
- ErrorCodes::NoSuchKey,
- str::stream() << "unable to find key " << statisticsKey << " at URI " << uri
- << ". reason: "
- << wiredtiger_strerror(ret));
+ return StatusWith<int64_t>(ErrorCodes::NoSuchKey,
+ str::stream()
+ << "unable to find key " << statisticsKey << " at URI "
+ << uri << ". reason: " << wiredtiger_strerror(ret));
}
int64_t value;
ret = cursor->get_value(cursor, nullptr, nullptr, &value);
if (ret != 0) {
- return StatusWith<int64_t>(
- ErrorCodes::BadValue,
- str::stream() << "unable to get value for key " << statisticsKey << " at URI " << uri
- << ". reason: "
- << wiredtiger_strerror(ret));
+ return StatusWith<int64_t>(ErrorCodes::BadValue,
+ str::stream() << "unable to get value for key " << statisticsKey
+ << " at URI " << uri
+ << ". reason: " << wiredtiger_strerror(ret));
}
return StatusWith<int64_t>(value);
@@ -461,7 +454,7 @@ WT_EVENT_HANDLER defaultEventHandlers() {
handlers.handle_progress = mdb_handle_progress;
return handlers;
}
-}
+} // namespace
WiredTigerEventHandler::WiredTigerEventHandler() {
WT_EVENT_HANDLER* handler = static_cast<WT_EVENT_HANDLER*>(this);
@@ -577,8 +570,7 @@ Status WiredTigerUtil::setTableLogging(WT_SESSION* session, const std::string& u
// Sanity check against a table having multiple logging specifications.
invariant(false,
str::stream() << "Table has contradictory logging settings. Uri: " << uri
- << " Conf: "
- << existingMetadata);
+ << " Conf: " << existingMetadata);
}
if (existingMetadata.find(setting) != std::string::npos) {
@@ -617,8 +609,8 @@ Status WiredTigerUtil::exportTableToBSON(WT_SESSION* session,
int ret = session->open_cursor(session, uri.c_str(), nullptr, cursorConfig, &c);
if (ret != 0) {
return Status(ErrorCodes::CursorNotFound,
- str::stream() << "unable to open cursor at URI " << uri << ". reason: "
- << wiredtiger_strerror(ret));
+ str::stream() << "unable to open cursor at URI " << uri
+ << ". reason: " << wiredtiger_strerror(ret));
}
bob->append("uri", uri);
invariant(c);
diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp
index e0e475b2230..47433ac4d38 100644
--- a/src/mongo/db/system_index.cpp
+++ b/src/mongo/db/system_index.cpp
@@ -73,20 +73,16 @@ const NamespaceString sessionCollectionNamespace("config.system.sessions");
MONGO_INITIALIZER(AuthIndexKeyPatterns)(InitializerContext*) {
v1SystemUsersKeyPattern = BSON("user" << 1 << "userSource" << 1);
- v3SystemUsersKeyPattern = BSON(
- AuthorizationManager::USER_NAME_FIELD_NAME << 1 << AuthorizationManager::USER_DB_FIELD_NAME
- << 1);
- v3SystemRolesKeyPattern = BSON(
- AuthorizationManager::ROLE_NAME_FIELD_NAME << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME
- << 1);
+ v3SystemUsersKeyPattern = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
+ v3SystemRolesKeyPattern = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
v3SystemUsersIndexName =
std::string(str::stream() << AuthorizationManager::USER_NAME_FIELD_NAME << "_1_"
- << AuthorizationManager::USER_DB_FIELD_NAME
- << "_1");
+ << AuthorizationManager::USER_DB_FIELD_NAME << "_1");
v3SystemRolesIndexName =
std::string(str::stream() << AuthorizationManager::ROLE_NAME_FIELD_NAME << "_1_"
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << "_1");
+ << AuthorizationManager::ROLE_DB_FIELD_NAME << "_1");
v3SystemUsersIndexSpec.addKeys(v3SystemUsersKeyPattern);
v3SystemUsersIndexSpec.unique();
diff --git a/src/mongo/db/traffic_reader.cpp b/src/mongo/db/traffic_reader.cpp
index 18fa2baf7dd..b6de5022d0b 100644
--- a/src/mongo/db/traffic_reader.cpp
+++ b/src/mongo/db/traffic_reader.cpp
@@ -93,8 +93,8 @@ bool readBytes(size_t toRead, char* buf, int fd) {
auto pair = errnoAndDescription();
uassert(ErrorCodes::FileStreamFailed,
- str::stream() << "failed to read bytes: errno(" << pair.first << ") : "
- << pair.second,
+ str::stream() << "failed to read bytes: errno(" << pair.first
+ << ") : " << pair.second,
pair.first == EINTR);
continue;
diff --git a/src/mongo/db/traffic_recorder.cpp b/src/mongo/db/traffic_recorder.cpp
index 17f4756cce9..4252cc1cfb5 100644
--- a/src/mongo/db/traffic_recorder.cpp
+++ b/src/mongo/db/traffic_recorder.cpp
@@ -100,7 +100,7 @@ public:
}
void run() {
- _thread = stdx::thread([ consumer = std::move(_pcqPipe.consumer), this ] {
+ _thread = stdx::thread([consumer = std::move(_pcqPipe.consumer), this] {
try {
DataBuilder db;
std::fstream out(_path,
diff --git a/src/mongo/db/traffic_recorder_validators.cpp b/src/mongo/db/traffic_recorder_validators.cpp
index 918784563d2..c9c48501e8d 100644
--- a/src/mongo/db/traffic_recorder_validators.cpp
+++ b/src/mongo/db/traffic_recorder_validators.cpp
@@ -38,8 +38,8 @@ namespace mongo {
Status validateTrafficRecordDestination(const std::string& path) {
if (!path.empty() && !boost::filesystem::is_directory(path)) {
return Status(ErrorCodes::FileNotOpen,
- str::stream() << "traffic recording directory \"" << path
- << "\" is not a directory.");
+ str::stream()
+ << "traffic recording directory \"" << path << "\" is not a directory.");
}
return Status::OK();
diff --git a/src/mongo/db/transaction_history_iterator.cpp b/src/mongo/db/transaction_history_iterator.cpp
index 81c8095eeac..82bbaa46eb2 100644
--- a/src/mongo/db/transaction_history_iterator.cpp
+++ b/src/mongo/db/transaction_history_iterator.cpp
@@ -95,8 +95,7 @@ BSONObj findOneOplogEntry(OperationContext* opCtx,
uassert(ErrorCodes::IncompleteTransactionHistory,
str::stream() << "oplog no longer contains the complete write history of this "
"transaction, log with opTime "
- << opTime.toBSON()
- << " cannot be found",
+ << opTime.toBSON() << " cannot be found",
getNextResult != PlanExecutor::IS_EOF);
if (getNextResult != PlanExecutor::ADVANCED) {
uassertStatusOKWithContext(WorkingSetCommon::getMemberObjectStatus(oplogBSON),
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index 46d2a49a6c0..a258ed1833d 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -419,8 +419,7 @@ void TransactionParticipant::Participant::_continueMultiDocumentTransaction(Oper
TxnNumber txnNumber) {
uassert(ErrorCodes::NoSuchTransaction,
str::stream()
- << "Given transaction number "
- << txnNumber
+ << "Given transaction number " << txnNumber
<< " does not match any in-progress transactions. The active transaction number is "
<< o().activeTxnNumber,
txnNumber == o().activeTxnNumber && !o().txnState.isInRetryableWriteMode());
@@ -442,8 +441,7 @@ void TransactionParticipant::Participant::_continueMultiDocumentTransaction(Oper
uasserted(
ErrorCodes::NoSuchTransaction,
str::stream()
- << "Transaction "
- << txnNumber
+ << "Transaction " << txnNumber
<< " has been aborted because an earlier command in this transaction failed.");
}
return;
@@ -503,9 +501,7 @@ void TransactionParticipant::Participant::beginOrContinue(OperationContext* opCt
uassert(ErrorCodes::TransactionTooOld,
str::stream() << "Cannot start transaction " << txnNumber << " on session "
- << _sessionId()
- << " because a newer transaction "
- << o().activeTxnNumber
+ << _sessionId() << " because a newer transaction " << o().activeTxnNumber
<< " has already started.",
txnNumber >= o().activeTxnNumber);
@@ -552,8 +548,7 @@ void TransactionParticipant::Participant::beginOrContinue(OperationContext* opCt
TransactionState::kNone | TransactionState::kAbortedWithoutPrepare;
uassert(50911,
str::stream() << "Cannot start a transaction at given transaction number "
- << txnNumber
- << " a transaction with the same number is in state "
+ << txnNumber << " a transaction with the same number is in state "
<< o().txnState,
o().txnState.isInSet(restartableStates));
}
@@ -1087,8 +1082,7 @@ Timestamp TransactionParticipant::Participant::prepareTransaction(
uassert(ErrorCodes::OperationNotSupportedInTransaction,
str::stream() << "prepareTransaction failed because one of the transaction "
"operations was done against a temporary collection '"
- << collection->ns()
- << "'.",
+ << collection->ns() << "'.",
!collection->isTemporary(opCtx));
}
@@ -1394,8 +1388,7 @@ void TransactionParticipant::Participant::commitPreparedTransaction(
str::stream() << "Commit oplog entry must be greater than or equal to commit "
"timestamp due to causal consistency. commit timestamp: "
<< commitTimestamp.toBSON()
- << ", commit oplog entry optime: "
- << commitOplogSlot.toBSON());
+ << ", commit oplog entry optime: " << commitOplogSlot.toBSON());
} else {
// We always expect a non-null commitOplogEntryOpTime to be passed in on secondaries
// in order to set the finishOpTime.
@@ -1847,8 +1840,7 @@ void TransactionParticipant::TransactionState::transitionTo(StateFlag newState,
if (shouldValidate == TransitionValidation::kValidateTransition) {
invariant(TransactionState::_isLegalTransition(_state, newState),
str::stream() << "Current state: " << toString(_state)
- << ", Illegal attempted next state: "
- << toString(newState));
+ << ", Illegal attempted next state: " << toString(newState));
}
// If we are transitioning out of prepare, signal waiters by fulfilling the completion promise.
@@ -2186,9 +2178,7 @@ boost::optional<repl::OpTime> TransactionParticipant::Participant::_checkStateme
if (it == p().activeTxnCommittedStatements.end()) {
uassert(ErrorCodes::IncompleteTransactionHistory,
str::stream() << "Incomplete history detected for transaction "
- << o().activeTxnNumber
- << " on session "
- << _sessionId(),
+ << o().activeTxnNumber << " on session " << _sessionId(),
!p().hasIncompleteHistory);
return boost::none;
@@ -2212,45 +2202,45 @@ void TransactionParticipant::Participant::_registerUpdateCacheOnCommit(
OperationContext* opCtx,
std::vector<StmtId> stmtIdsWritten,
const repl::OpTime& lastStmtIdWriteOpTime) {
- opCtx->recoveryUnit()->onCommit(
- [ opCtx, stmtIdsWritten = std::move(stmtIdsWritten), lastStmtIdWriteOpTime ](
- boost::optional<Timestamp>) {
- TransactionParticipant::Participant participant(opCtx);
- invariant(participant.p().isValid);
-
- RetryableWritesStats::get(opCtx->getServiceContext())
- ->incrementTransactionsCollectionWriteCount();
-
- stdx::lock_guard<Client> lg(*opCtx->getClient());
-
- // The cache of the last written record must always be advanced after a write so that
- // subsequent writes have the correct point to start from.
- participant.o(lg).lastWriteOpTime = lastStmtIdWriteOpTime;
-
- for (const auto stmtId : stmtIdsWritten) {
- if (stmtId == kIncompleteHistoryStmtId) {
- participant.p().hasIncompleteHistory = true;
- continue;
- }
-
- const auto insertRes = participant.p().activeTxnCommittedStatements.emplace(
- stmtId, lastStmtIdWriteOpTime);
- if (!insertRes.second) {
- const auto& existingOpTime = insertRes.first->second;
- fassertOnRepeatedExecution(participant._sessionId(),
- participant.o().activeTxnNumber,
- stmtId,
- existingOpTime,
- lastStmtIdWriteOpTime);
- }
+ opCtx->recoveryUnit()->onCommit([opCtx,
+ stmtIdsWritten = std::move(stmtIdsWritten),
+ lastStmtIdWriteOpTime](boost::optional<Timestamp>) {
+ TransactionParticipant::Participant participant(opCtx);
+ invariant(participant.p().isValid);
+
+ RetryableWritesStats::get(opCtx->getServiceContext())
+ ->incrementTransactionsCollectionWriteCount();
+
+ stdx::lock_guard<Client> lg(*opCtx->getClient());
+
+ // The cache of the last written record must always be advanced after a write so that
+ // subsequent writes have the correct point to start from.
+ participant.o(lg).lastWriteOpTime = lastStmtIdWriteOpTime;
+
+ for (const auto stmtId : stmtIdsWritten) {
+ if (stmtId == kIncompleteHistoryStmtId) {
+ participant.p().hasIncompleteHistory = true;
+ continue;
}
- // If this is the first time executing a retryable write, we should indicate that to
- // the transaction participant.
- if (participant.o(lg).txnState.isNone()) {
- participant.o(lg).txnState.transitionTo(TransactionState::kExecutedRetryableWrite);
+ const auto insertRes =
+ participant.p().activeTxnCommittedStatements.emplace(stmtId, lastStmtIdWriteOpTime);
+ if (!insertRes.second) {
+ const auto& existingOpTime = insertRes.first->second;
+ fassertOnRepeatedExecution(participant._sessionId(),
+ participant.o().activeTxnNumber,
+ stmtId,
+ existingOpTime,
+ lastStmtIdWriteOpTime);
}
- });
+ }
+
+ // If this is the first time executing a retryable write, we should indicate that to
+ // the transaction participant.
+ if (participant.o(lg).txnState.isNone()) {
+ participant.o(lg).txnState.transitionTo(TransactionState::kExecutedRetryableWrite);
+ }
+ });
MONGO_FAIL_POINT_BLOCK(onPrimaryTransactionalWrite, customArgs) {
const auto& data = customArgs.getData();
@@ -2264,9 +2254,9 @@ void TransactionParticipant::Participant::_registerUpdateCacheOnCommit(
if (!failBeforeCommitExceptionElem.eoo()) {
const auto failureCode = ErrorCodes::Error(int(failBeforeCommitExceptionElem.Number()));
uasserted(failureCode,
- str::stream() << "Failing write for " << _sessionId() << ":"
- << o().activeTxnNumber
- << " due to failpoint. The write must not be reflected.");
+ str::stream()
+ << "Failing write for " << _sessionId() << ":" << o().activeTxnNumber
+ << " due to failpoint. The write must not be reflected.");
}
}
}
diff --git a/src/mongo/db/transaction_participant_test.cpp b/src/mongo/db/transaction_participant_test.cpp
index 646431735da..58ecad234af 100644
--- a/src/mongo/db/transaction_participant_test.cpp
+++ b/src/mongo/db/transaction_participant_test.cpp
@@ -375,11 +375,11 @@ TEST_F(TxnParticipantTest, StashAndUnstashResources) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Perform initial unstash which sets up a WriteUnitOfWork.
@@ -1153,20 +1153,19 @@ TEST_F(TxnParticipantTest, CannotStartNewTransactionWhilePreparedTransactionInPr
auto guard = makeGuard([&]() { OperationContextSession::checkOut(opCtx()); });
// Try to start a new transaction while there is already a prepared transaction on the
// session. This should fail with a PreparedTransactionInProgress error.
- runFunctionFromDifferentOpCtx([
- lsid = *opCtx()->getLogicalSessionId(),
- txnNumberToStart = *opCtx()->getTxnNumber() + 1
- ](OperationContext * newOpCtx) {
- newOpCtx->setLogicalSessionId(lsid);
- newOpCtx->setTxnNumber(txnNumberToStart);
-
- MongoDOperationContextSession ocs(newOpCtx);
- auto txnParticipant = TransactionParticipant::get(newOpCtx);
- ASSERT_THROWS_CODE(
- txnParticipant.beginOrContinue(newOpCtx, txnNumberToStart, false, true),
- AssertionException,
- ErrorCodes::PreparedTransactionInProgress);
- });
+ runFunctionFromDifferentOpCtx(
+ [lsid = *opCtx()->getLogicalSessionId(),
+ txnNumberToStart = *opCtx()->getTxnNumber() + 1](OperationContext* newOpCtx) {
+ newOpCtx->setLogicalSessionId(lsid);
+ newOpCtx->setTxnNumber(txnNumberToStart);
+
+ MongoDOperationContextSession ocs(newOpCtx);
+ auto txnParticipant = TransactionParticipant::get(newOpCtx);
+ ASSERT_THROWS_CODE(
+ txnParticipant.beginOrContinue(newOpCtx, txnNumberToStart, false, true),
+ AssertionException,
+ ErrorCodes::PreparedTransactionInProgress);
+ });
}
ASSERT_FALSE(txnParticipant.transactionIsAborted());
@@ -1277,11 +1276,11 @@ TEST_F(TxnParticipantTest, StashInNestedSessionIsANoop) {
// Set the readConcern on the OperationContext.
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Perform initial unstash, which sets up a WriteUnitOfWork.
@@ -2671,11 +2670,11 @@ TEST_F(TransactionsMetricsTest, ReportStashedResources) {
std::move(clientMetadata.getValue()));
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Perform initial unstash which sets up a WriteUnitOfWork.
@@ -2758,11 +2757,11 @@ TEST_F(TransactionsMetricsTest, ReportUnstashedResources) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Perform initial unstash which sets up a WriteUnitOfWork.
@@ -3107,11 +3106,11 @@ TEST_F(TransactionsMetricsTest, TestTransactionInfoForLogAfterCommit) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
@@ -3147,11 +3146,11 @@ TEST_F(TransactionsMetricsTest, TestPreparedTransactionInfoForLogAfterCommit) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
@@ -3189,11 +3188,11 @@ TEST_F(TransactionsMetricsTest, TestTransactionInfoForLogAfterAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3230,11 +3229,11 @@ TEST_F(TransactionsMetricsTest, TestPreparedTransactionInfoForLogAfterAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Prepare the transaction and extend the duration in the prepared state.
@@ -3268,11 +3267,11 @@ DEATH_TEST_F(TransactionsMetricsTest, TestTransactionInfoForLogWithNoLockerInfoS
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3292,11 +3291,11 @@ TEST_F(TransactionsMetricsTest, LogTransactionInfoAfterSlowCommit) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3330,11 +3329,11 @@ TEST_F(TransactionsMetricsTest, LogPreparedTransactionInfoAfterSlowCommit) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3367,11 +3366,11 @@ TEST_F(TransactionsMetricsTest, LogTransactionInfoAfterSlowAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3410,11 +3409,11 @@ TEST_F(TransactionsMetricsTest, LogPreparedTransactionInfoAfterSlowAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3456,11 +3455,11 @@ TEST_F(TransactionsMetricsTest, LogTransactionInfoAfterExceptionInPrepare) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3503,11 +3502,11 @@ TEST_F(TransactionsMetricsTest, LogTransactionInfoAfterSlowStashedAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3588,11 +3587,11 @@ TEST_F(TxnParticipantTest, RollbackResetsInMemoryStateOfPreparedTransaction) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
diff --git a/src/mongo/db/update/addtoset_node.cpp b/src/mongo/db/update/addtoset_node.cpp
index 4805ae5c825..b12c4ceeb9b 100644
--- a/src/mongo/db/update/addtoset_node.cpp
+++ b/src/mongo/db/update/addtoset_node.cpp
@@ -108,8 +108,7 @@ ModifierNode::ModifyResult AddToSetNode::updateExistingElement(
mutablebson::Element* element, std::shared_ptr<FieldRef> elementPath) const {
uassert(ErrorCodes::BadValue,
str::stream() << "Cannot apply $addToSet to non-array field. Field named '"
- << element->getFieldName()
- << "' has non-array type "
+ << element->getFieldName() << "' has non-array type "
<< typeName(element->getType()),
element->getType() == BSONType::Array);
diff --git a/src/mongo/db/update/addtoset_node_test.cpp b/src/mongo/db/update/addtoset_node_test.cpp
index 9c3bfc283a5..0aaf434fcdb 100644
--- a/src/mongo/db/update/addtoset_node_test.cpp
+++ b/src/mongo/db/update/addtoset_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using AddToSetNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(AddToSetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$addToSet: {}}");
diff --git a/src/mongo/db/update/arithmetic_node.cpp b/src/mongo/db/update/arithmetic_node.cpp
index 304b0261e90..58c0d4a27ab 100644
--- a/src/mongo/db/update/arithmetic_node.cpp
+++ b/src/mongo/db/update/arithmetic_node.cpp
@@ -55,9 +55,7 @@ Status ArithmeticNode::init(BSONElement modExpr,
if (!modExpr.isNumber()) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Cannot " << getNameForOp(_op)
- << " with non-numeric argument: {"
- << modExpr
- << "}");
+ << " with non-numeric argument: {" << modExpr << "}");
}
_val = modExpr;
@@ -72,10 +70,8 @@ ModifierNode::ModifyResult ArithmeticNode::updateExistingElement(
str::stream() << "Cannot apply " << operatorName()
<< " to a value of non-numeric type. {"
<< (idElem.ok() ? idElem.toString() : "no id")
- << "} has the field '"
- << element->getFieldName()
- << "' of non-numeric type "
- << typeName(element->getType()));
+ << "} has the field '" << element->getFieldName()
+ << "' of non-numeric type " << typeName(element->getType()));
}
SafeNum originalValue = element->getValueSafeNum();
@@ -97,10 +93,8 @@ ModifierNode::ModifyResult ArithmeticNode::updateExistingElement(
auto idElem = mutablebson::findFirstChildNamed(element->getDocument().root(), "_id");
uasserted(ErrorCodes::BadValue,
str::stream() << "Failed to apply " << operatorName()
- << " operations to current value ("
- << originalValue.debugString()
- << ") for document {"
- << (idElem.ok() ? idElem.toString() : "no id")
+ << " operations to current value (" << originalValue.debugString()
+ << ") for document {" << (idElem.ok() ? idElem.toString() : "no id")
<< "}");
} else {
invariant(element->setValueSafeNum(valueToSet));
diff --git a/src/mongo/db/update/arithmetic_node_test.cpp b/src/mongo/db/update/arithmetic_node_test.cpp
index d18cc4f1314..2783a32d547 100644
--- a/src/mongo/db/update/arithmetic_node_test.cpp
+++ b/src/mongo/db/update/arithmetic_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using ArithmeticNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(ArithmeticNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$inc: {}}");
diff --git a/src/mongo/db/update/bit_node.cpp b/src/mongo/db/update/bit_node.cpp
index 67a334970fc..19f7a560846 100644
--- a/src/mongo/db/update/bit_node.cpp
+++ b/src/mongo/db/update/bit_node.cpp
@@ -60,9 +60,7 @@ Status BitNode::init(BSONElement modExpr, const boost::intrusive_ptr<ExpressionC
return Status(ErrorCodes::BadValue,
str::stream()
<< "The $bit modifier only supports 'and', 'or', and 'xor', not '"
- << payloadFieldName
- << "' which is an unknown operator: {"
- << curOp
+ << payloadFieldName << "' which is an unknown operator: {" << curOp
<< "}");
}
@@ -70,9 +68,7 @@ Status BitNode::init(BSONElement modExpr, const boost::intrusive_ptr<ExpressionC
return Status(ErrorCodes::BadValue,
str::stream()
<< "The $bit modifier field must be an Integer(32/64 bit); a '"
- << typeName(curOp.type())
- << "' is not supported here: {"
- << curOp
+ << typeName(curOp.type()) << "' is not supported here: {" << curOp
<< "}");
}
@@ -97,11 +93,8 @@ ModifierNode::ModifyResult BitNode::updateExistingElement(
mutablebson::findFirstChildNamed(element->getDocument().root(), "_id");
uasserted(ErrorCodes::BadValue,
str::stream() << "Cannot apply $bit to a value of non-integral type."
- << idElem.toString()
- << " has the field "
- << element->getFieldName()
- << " of non-integer type "
- << typeName(element->getType()));
+ << idElem.toString() << " has the field " << element->getFieldName()
+ << " of non-integer type " << typeName(element->getType()));
}
SafeNum value = applyOpList(element->getValueSafeNum());
diff --git a/src/mongo/db/update/bit_node.h b/src/mongo/db/update/bit_node.h
index 07812b3e08a..a2d51dadb4d 100644
--- a/src/mongo/db/update/bit_node.h
+++ b/src/mongo/db/update/bit_node.h
@@ -72,7 +72,7 @@ private:
BSONObjBuilder bob;
{
BSONObjBuilder subBuilder(bob.subobjStart(""));
- for (const auto[bitOperator, operand] : _opList) {
+ for (const auto [bitOperator, operand] : _opList) {
operand.toBSON(
[](SafeNum (SafeNum::*bitOperator)(const SafeNum&) const) {
if (bitOperator == &SafeNum::bitAnd)
diff --git a/src/mongo/db/update/bit_node_test.cpp b/src/mongo/db/update/bit_node_test.cpp
index 78734dd63dd..488ad971e5d 100644
--- a/src/mongo/db/update/bit_node_test.cpp
+++ b/src/mongo/db/update/bit_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using BitNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(BitNodeTest, InitWithDoubleFails) {
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -292,4 +292,4 @@ TEST_F(BitNodeTest, ApplyRepeatedBitOps) {
}
} // namespace
-} // namepace mongo
+} // namespace mongo
diff --git a/src/mongo/db/update/compare_node_test.cpp b/src/mongo/db/update/compare_node_test.cpp
index b500701cf2d..05c5d9ee68a 100644
--- a/src/mongo/db/update/compare_node_test.cpp
+++ b/src/mongo/db/update/compare_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using CompareNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(CompareNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$max: {}}");
diff --git a/src/mongo/db/update/current_date_node_test.cpp b/src/mongo/db/update/current_date_node_test.cpp
index 7bd11c9140b..e16a2cdbe46 100644
--- a/src/mongo/db/update/current_date_node_test.cpp
+++ b/src/mongo/db/update/current_date_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using CurrentDateNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(CurrentDateNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$currentDate: {}}");
@@ -286,4 +286,4 @@ TEST_F(CurrentDateNodeTest, ApplyNoIndexDataOrLogBuilder) {
}
} // namespace
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/update/field_checker_test.cpp b/src/mongo/db/update/field_checker_test.cpp
index 99deff9fb07..d95b2bc681f 100644
--- a/src/mongo/db/update/field_checker_test.cpp
+++ b/src/mongo/db/update/field_checker_test.cpp
@@ -38,9 +38,9 @@ namespace {
using mongo::ErrorCodes;
using mongo::FieldRef;
-using mongo::fieldchecker::isUpdatable;
-using mongo::fieldchecker::isPositional;
using mongo::Status;
+using mongo::fieldchecker::isPositional;
+using mongo::fieldchecker::isUpdatable;
TEST(IsUpdatable, Basics) {
FieldRef fieldRef("x");
diff --git a/src/mongo/db/update/log_builder.cpp b/src/mongo/db/update/log_builder.cpp
index e78cd295b1f..5fbd6514791 100644
--- a/src/mongo/db/update/log_builder.cpp
+++ b/src/mongo/db/update/log_builder.cpp
@@ -89,11 +89,9 @@ Status LogBuilder::addToSetsWithNewFieldName(StringData name, const mutablebson:
mutablebson::Element elemToSet = _logRoot.getDocument().makeElementWithNewFieldName(name, val);
if (!elemToSet.ok())
return Status(ErrorCodes::InternalError,
- str::stream() << "Could not create new '" << name
- << "' element from existing element '"
- << val.getFieldName()
- << "' of type "
- << typeName(val.getType()));
+ str::stream()
+ << "Could not create new '" << name << "' element from existing element '"
+ << val.getFieldName() << "' of type " << typeName(val.getType()));
return addToSets(elemToSet);
}
@@ -102,11 +100,9 @@ Status LogBuilder::addToSetsWithNewFieldName(StringData name, const BSONElement&
mutablebson::Element elemToSet = _logRoot.getDocument().makeElementWithNewFieldName(name, val);
if (!elemToSet.ok())
return Status(ErrorCodes::InternalError,
- str::stream() << "Could not create new '" << name
- << "' element from existing element '"
- << val.fieldName()
- << "' of type "
- << typeName(val.type()));
+ str::stream()
+ << "Could not create new '" << name << "' element from existing element '"
+ << val.fieldName() << "' of type " << typeName(val.type()));
return addToSets(elemToSet);
}
diff --git a/src/mongo/db/update/modifier_node.cpp b/src/mongo/db/update/modifier_node.cpp
index 674a2d8e361..dd0341255a3 100644
--- a/src/mongo/db/update/modifier_node.cpp
+++ b/src/mongo/db/update/modifier_node.cpp
@@ -66,10 +66,8 @@ void checkImmutablePathsNotModifiedFromOriginal(mutablebson::Element element,
if (prefixSize == (*immutablePath)->numParts()) {
uasserted(ErrorCodes::ImmutableField,
str::stream() << "Updating the path '" << pathTaken->dottedField() << "' to "
- << element.toString()
- << " would modify the immutable field '"
- << (*immutablePath)->dottedField()
- << "'");
+ << element.toString() << " would modify the immutable field '"
+ << (*immutablePath)->dottedField() << "'");
}
// If 'pathTaken' is a strict prefix of 'immutablePath', then we may have modified
@@ -106,8 +104,7 @@ void checkImmutablePathsNotModifiedFromOriginal(mutablebson::Element element,
uassert(ErrorCodes::ImmutableField,
str::stream() << "After applying the update, the immutable field '"
<< (*immutablePath)->dottedField()
- << "' was found to have been altered to "
- << newElem.toString(),
+ << "' was found to have been altered to " << newElem.toString(),
newElem.compareWithBSONElement(oldElem, nullptr, false) == 0);
}
}
@@ -137,8 +134,7 @@ void checkImmutablePathsNotModified(mutablebson::Element element,
uassert(ErrorCodes::ImmutableField,
str::stream() << "Performing an update on the path '" << pathTaken->dottedField()
<< "' would modify the immutable field '"
- << (*immutablePath)->dottedField()
- << "'",
+ << (*immutablePath)->dottedField() << "'",
pathTaken->commonPrefixSize(**immutablePath) <
std::min(pathTaken->numParts(), (*immutablePath)->numParts()));
}
@@ -265,12 +261,10 @@ UpdateExecutor::ApplyResult ModifierNode::applyToNonexistentElement(
// because we just created this element.)
uassert(ErrorCodes::ImmutableField,
str::stream() << "Updating the path '"
- << updateNodeApplyParams.pathTaken->dottedField()
- << "' to "
+ << updateNodeApplyParams.pathTaken->dottedField() << "' to "
<< applyParams.element.toString()
<< " would modify the immutable field '"
- << (*immutablePath)->dottedField()
- << "'",
+ << (*immutablePath)->dottedField() << "'",
updateNodeApplyParams.pathTaken->commonPrefixSize(**immutablePath) !=
(*immutablePath)->numParts());
}
diff --git a/src/mongo/db/update/object_replace_executor.cpp b/src/mongo/db/update/object_replace_executor.cpp
index 8a65cd1b0ca..31ea35df114 100644
--- a/src/mongo/db/update/object_replace_executor.cpp
+++ b/src/mongo/db/update/object_replace_executor.cpp
@@ -136,8 +136,7 @@ UpdateExecutor::ApplyResult ObjectReplaceExecutor::applyReplacementUpdate(
uassert(ErrorCodes::ImmutableField,
str::stream() << "After applying the update, the (immutable) field '"
<< (*path)->dottedField()
- << "' was found to have been altered to "
- << newElem.toString(),
+ << "' was found to have been altered to " << newElem.toString(),
newElem.compareWithBSONElement(oldElem, nullptr, false) == 0);
}
}
diff --git a/src/mongo/db/update/object_replace_executor_test.cpp b/src/mongo/db/update/object_replace_executor_test.cpp
index cef054fd289..6b0d93f6e46 100644
--- a/src/mongo/db/update/object_replace_executor_test.cpp
+++ b/src/mongo/db/update/object_replace_executor_test.cpp
@@ -42,8 +42,8 @@ namespace mongo {
namespace {
using ObjectReplaceExecutorTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST_F(ObjectReplaceExecutorTest, Noop) {
auto obj = fromjson("{a: 1, b: 2}");
diff --git a/src/mongo/db/update/path_support.cpp b/src/mongo/db/update/path_support.cpp
index 55f369164a8..a419ee2f457 100644
--- a/src/mongo/db/update/path_support.cpp
+++ b/src/mongo/db/update/path_support.cpp
@@ -53,8 +53,8 @@ Status maybePadTo(mutablebson::Element* elemArray, size_t sizeRequired) {
if (toPad > kMaxPaddingAllowed) {
return Status(ErrorCodes::CannotBackfillArray,
- str::stream() << "can't backfill more than " << kMaxPaddingAllowed
- << " elements");
+ str::stream()
+ << "can't backfill more than " << kMaxPaddingAllowed << " elements");
}
for (size_t i = 0; i < toPad; i++) {
@@ -128,10 +128,8 @@ Status findLongestPrefix(const FieldRef& prefix,
*elemFound = prev;
return Status(ErrorCodes::PathNotViable,
str::stream() << "cannot use the part (" << prefix.getPart(i - 1) << " of "
- << prefix.dottedField()
- << ") to traverse the element ({"
- << curr.toString()
- << "})");
+ << prefix.dottedField() << ") to traverse the element ({"
+ << curr.toString() << "})");
} else if (curr.ok()) {
*idxFound = i - 1;
*elemFound = curr;
@@ -153,9 +151,7 @@ StatusWith<mutablebson::Element> createPathAt(const FieldRef& prefix,
if (elemFound.getType() != BSONType::Object && elemFound.getType() != BSONType::Array) {
return Status(ErrorCodes::PathNotViable,
str::stream() << "Cannot create field '" << prefix.getPart(idxFound)
- << "' in element {"
- << elemFound.toString()
- << "}");
+ << "' in element {" << elemFound.toString() << "}");
}
// Sanity check that 'idxField' is an actual part.
@@ -175,9 +171,7 @@ StatusWith<mutablebson::Element> createPathAt(const FieldRef& prefix,
if (!newIdx) {
return Status(ErrorCodes::PathNotViable,
str::stream() << "Cannot create field '" << prefix.getPart(idxFound)
- << "' in element {"
- << elemFound.toString()
- << "}");
+ << "' in element {" << elemFound.toString() << "}");
}
status = maybePadTo(&elemFound, *newIdx);
diff --git a/src/mongo/db/update/path_support_test.cpp b/src/mongo/db/update/path_support_test.cpp
index ecd2fa9bab5..33300f956af 100644
--- a/src/mongo/db/update/path_support_test.cpp
+++ b/src/mongo/db/update/path_support_test.cpp
@@ -57,10 +57,10 @@ namespace {
using namespace mongo;
using namespace pathsupport;
-using str::stream;
using mutablebson::Element;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
+using str::stream;
class EmptyDoc : public mongo::unittest::Test {
public:
@@ -606,9 +606,7 @@ static void assertContains(const EqualityMatches& equalities, const BSONObj& wra
&SimpleStringDataComparator::kInstance);
if (eltCmp.evaluate(it->second->getData() != value)) {
FAIL(stream() << "Equality match at path \"" << path << "\" contains value "
- << it->second->getData()
- << ", not value "
- << value);
+ << it->second->getData() << ", not value " << value);
}
}
@@ -898,19 +896,14 @@ static void assertParent(const EqualityMatches& equalities,
StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
if (foundParentPath != parentPath) {
FAIL(stream() << "Equality match parent at path \"" << foundParentPath
- << "\" does not match \""
- << parentPath
- << "\"");
+ << "\" does not match \"" << parentPath << "\"");
}
BSONElementComparator eltCmp(BSONElementComparator::FieldNamesMode::kIgnore,
&SimpleStringDataComparator::kInstance);
if (eltCmp.evaluate(parentEl != value)) {
FAIL(stream() << "Equality match parent for \"" << pathStr << "\" at path \"" << parentPath
- << "\" contains value "
- << parentEl
- << ", not value "
- << value);
+ << "\" contains value " << parentEl << ", not value " << value);
}
}
@@ -930,8 +923,7 @@ static void assertNoParent(const EqualityMatches& equalities, StringData pathStr
if (!parentEl.eoo()) {
StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
FAIL(stream() << "Equality matches contained parent for \"" << pathStr << "\" at \""
- << foundParentPath
- << "\"");
+ << foundParentPath << "\"");
}
}
diff --git a/src/mongo/db/update/pipeline_executor_test.cpp b/src/mongo/db/update/pipeline_executor_test.cpp
index 2a10c292532..1c5c4297485 100644
--- a/src/mongo/db/update/pipeline_executor_test.cpp
+++ b/src/mongo/db/update/pipeline_executor_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using PipelineExecutorTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST_F(PipelineExecutorTest, Noop) {
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
diff --git a/src/mongo/db/update/pop_node.cpp b/src/mongo/db/update/pop_node.cpp
index 35ff39204af..3d4355793f1 100644
--- a/src/mongo/db/update/pop_node.cpp
+++ b/src/mongo/db/update/pop_node.cpp
@@ -54,8 +54,7 @@ ModifierNode::ModifyResult PopNode::updateExistingElement(
uassert(ErrorCodes::TypeMismatch,
str::stream() << "Path '" << elementPath->dottedField()
<< "' contains an element of non-array type '"
- << typeName(element->getType())
- << "'",
+ << typeName(element->getType()) << "'",
element->getType() == BSONType::Array);
if (!element->hasChildren()) {
diff --git a/src/mongo/db/update/pull_node_test.cpp b/src/mongo/db/update/pull_node_test.cpp
index b9092a98927..39f41ba06f1 100644
--- a/src/mongo/db/update/pull_node_test.cpp
+++ b/src/mongo/db/update/pull_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using PullNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(PullNodeTest, InitWithBadMatchExpressionFails) {
auto update = fromjson("{$pull: {a: {b: {$foo: 1}}}}");
diff --git a/src/mongo/db/update/pullall_node.cpp b/src/mongo/db/update/pullall_node.cpp
index e4c0936fa83..e6e9e4570cd 100644
--- a/src/mongo/db/update/pullall_node.cpp
+++ b/src/mongo/db/update/pullall_node.cpp
@@ -48,7 +48,7 @@ public:
bool match(const mutablebson::ConstElement& element) final {
return std::any_of(_elementsToMatch.begin(),
_elementsToMatch.end(),
- [&element, collator{_collator} ](const auto& elementToMatch) {
+ [&element, collator{_collator}](const auto& elementToMatch) {
return element.compareWithBSONElement(
elementToMatch, collator, false) == 0;
});
diff --git a/src/mongo/db/update/pullall_node_test.cpp b/src/mongo/db/update/pullall_node_test.cpp
index 60b09e7b77d..dd77b411dcf 100644
--- a/src/mongo/db/update/pullall_node_test.cpp
+++ b/src/mongo/db/update/pullall_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using PullAllNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(PullAllNodeTest, InitWithIntFails) {
auto update = fromjson("{$pullAll: {a: 1}}");
diff --git a/src/mongo/db/update/push_node.cpp b/src/mongo/db/update/push_node.cpp
index 6702af4fec3..a4a79fb6e5a 100644
--- a/src/mongo/db/update/push_node.cpp
+++ b/src/mongo/db/update/push_node.cpp
@@ -292,10 +292,8 @@ ModifierNode::ModifyResult PushNode::performPush(mutablebson::Element* element,
uasserted(ErrorCodes::BadValue,
str::stream() << "The field '" << elementPath->dottedField() << "'"
<< " must be an array but is of type "
- << typeName(element->getType())
- << " in document {"
- << (idElem.ok() ? idElem.toString() : "no id")
- << "}");
+ << typeName(element->getType()) << " in document {"
+ << (idElem.ok() ? idElem.toString() : "no id") << "}");
}
auto result = insertElementsWithPosition(element, _position, _valuesToPush);
diff --git a/src/mongo/db/update/push_node_test.cpp b/src/mongo/db/update/push_node_test.cpp
index d0ef73e22e5..985ee81ca2c 100644
--- a/src/mongo/db/update/push_node_test.cpp
+++ b/src/mongo/db/update/push_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using PushNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(PushNodeTest, EachClauseWithNonArrayObjectFails) {
auto update = fromjson("{$push: {x: {$each: {'0': 1}}}}");
@@ -670,12 +670,9 @@ void checkDocumentAndResult(BSONObj updateModifier,
FAIL(str::stream() << "apply() failure for " << updateModifier << ". Expected "
<< expectedDocument
<< " (noop = false, indexesAffected = false) but got "
- << actualDocument.toString()
- << " (noop = "
- << (applyResult.noop ? "true" : "false")
- << ", indexesAffected = "
- << (applyResult.indexesAffected ? "true" : "false")
- << ").");
+ << actualDocument.toString() << " (noop = "
+ << (applyResult.noop ? "true" : "false") << ", indexesAffected = "
+ << (applyResult.indexesAffected ? "true" : "false") << ").");
}
}
@@ -828,9 +825,7 @@ TEST_F(PushNodeTest, ApplyToPopulatedArrayWithSortAndSliceValues) {
auto update =
BSON("$push" << BSON("a" << BSON("$each" << BSON_ARRAY(BSON("a" << 2 << "b" << 1)
<< BSON("a" << 1 << "b" << 1))
- << "$slice"
- << data.sliceValue
- << "$sort"
+ << "$slice" << data.sliceValue << "$sort"
<< data.sortOrder)));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
PushNode node;
diff --git a/src/mongo/db/update/rename_node.cpp b/src/mongo/db/update/rename_node.cpp
index bdf968664d0..ba9446c5789 100644
--- a/src/mongo/db/update/rename_node.cpp
+++ b/src/mongo/db/update/rename_node.cpp
@@ -133,8 +133,8 @@ Status RenameNode::init(BSONElement modExpr,
// Though we could treat this as a no-op, it is illegal in the current implementation.
if (fromFieldRef == toFieldRef) {
return Status(ErrorCodes::BadValue,
- str::stream() << "The source and target field for $rename must differ: "
- << modExpr);
+ str::stream()
+ << "The source and target field for $rename must differ: " << modExpr);
}
if (fromFieldRef.isPrefixOf(toFieldRef) || toFieldRef.isPrefixOf(fromFieldRef)) {
@@ -203,12 +203,10 @@ UpdateExecutor::ApplyResult RenameNode::apply(ApplyParams applyParams,
auto idElem = mutablebson::findFirstChildNamed(document.root(), "_id");
uasserted(ErrorCodes::BadValue,
str::stream() << "The source field cannot be an array element, '"
- << fromFieldRef->dottedField()
- << "' in doc with "
+ << fromFieldRef->dottedField() << "' in doc with "
<< (idElem.ok() ? idElem.toString() : "no id")
<< " has an array field called '"
- << currentElement.getFieldName()
- << "'");
+ << currentElement.getFieldName() << "'");
}
}
@@ -225,12 +223,10 @@ UpdateExecutor::ApplyResult RenameNode::apply(ApplyParams applyParams,
auto idElem = mutablebson::findFirstChildNamed(document.root(), "_id");
uasserted(ErrorCodes::BadValue,
str::stream() << "The destination field cannot be an array element, '"
- << toFieldRef.dottedField()
- << "' in doc with "
+ << toFieldRef.dottedField() << "' in doc with "
<< (idElem.ok() ? idElem.toString() : "no id")
<< " has an array field called '"
- << currentElement.getFieldName()
- << "'");
+ << currentElement.getFieldName() << "'");
}
}
diff --git a/src/mongo/db/update/rename_node_test.cpp b/src/mongo/db/update/rename_node_test.cpp
index 93ddfd61714..6eec4d8f498 100644
--- a/src/mongo/db/update/rename_node_test.cpp
+++ b/src/mongo/db/update/rename_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using RenameNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(RenameNodeTest, PositionalNotAllowedInFromField) {
auto update = fromjson("{$rename: {'a.$': 'b'}}");
@@ -476,8 +476,7 @@ TEST_F(RenameNodeTest, ApplyCanRemoveRequiredPartOfDBRefIfValidateForStorageIsFa
ASSERT_TRUE(result.indexesAffected);
auto updated = BSON("a" << BSON("$ref"
<< "c")
- << "b"
- << 0);
+ << "b" << 0);
ASSERT_EQUALS(updated, doc);
ASSERT_FALSE(doc.isInPlaceModeEnabled());
ASSERT_EQUALS(fromjson("{$set: {'b': 0}, $unset: {'a.$id': true}}"), getLogDoc());
diff --git a/src/mongo/db/update/set_node_test.cpp b/src/mongo/db/update/set_node_test.cpp
index f7280e83110..8f160c4fe13 100644
--- a/src/mongo/db/update/set_node_test.cpp
+++ b/src/mongo/db/update/set_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using SetNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(SetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$set: {}}");
diff --git a/src/mongo/db/update/storage_validation.cpp b/src/mongo/db/update/storage_validation.cpp
index ce5147f42e2..009343776f0 100644
--- a/src/mongo/db/update/storage_validation.cpp
+++ b/src/mongo/db/update/storage_validation.cpp
@@ -104,8 +104,7 @@ void validateDollarPrefixElement(mutablebson::ConstElement elem) {
// Not an okay, $ prefixed field name.
uasserted(ErrorCodes::DollarPrefixedFieldName,
str::stream() << "The dollar ($) prefixed field '" << elem.getFieldName()
- << "' in '"
- << mutablebson::getFullName(elem)
+ << "' in '" << mutablebson::getFullName(elem)
<< "' is not valid for storage.");
}
}
diff --git a/src/mongo/db/update/unset_node_test.cpp b/src/mongo/db/update/unset_node_test.cpp
index 346c5e4551c..09788ef573b 100644
--- a/src/mongo/db/update/unset_node_test.cpp
+++ b/src/mongo/db/update/unset_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using UnsetNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(UnsetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$unset: {}}");
diff --git a/src/mongo/db/update/update_array_node.h b/src/mongo/db/update/update_array_node.h
index 7d942698953..c6e90c1d9c3 100644
--- a/src/mongo/db/update/update_array_node.h
+++ b/src/mongo/db/update/update_array_node.h
@@ -86,7 +86,7 @@ public:
FieldRef* currentPath,
std::map<std::string, std::vector<std::pair<std::string, BSONObj>>>*
operatorOrientedUpdates) const final {
- for (const auto & [ pathSuffix, child ] : _children) {
+ for (const auto& [pathSuffix, child] : _children) {
FieldRef::FieldRefTempAppend tempAppend(*currentPath,
toArrayFilterIdentifier(pathSuffix));
child->produceSerializationMap(currentPath, operatorOrientedUpdates);
diff --git a/src/mongo/db/update/update_driver.cpp b/src/mongo/db/update/update_driver.cpp
index 7dab889aa35..8f7024f80c1 100644
--- a/src/mongo/db/update/update_driver.cpp
+++ b/src/mongo/db/update/update_driver.cpp
@@ -76,26 +76,21 @@ modifiertable::ModifierType validateMod(BSONElement mod) {
uassert(
ErrorCodes::FailedToParse,
str::stream()
- << "Unknown modifier: "
- << mod.fieldName()
+ << "Unknown modifier: " << mod.fieldName()
<< ". Expected a valid update modifier or pipeline-style update specified as an array",
modType != modifiertable::MOD_UNKNOWN);
uassert(ErrorCodes::FailedToParse,
str::stream() << "Modifiers operate on fields but we found type "
- << typeName(mod.type())
- << " instead. For example: {$mod: {<field>: ...}}"
- << " not {"
- << mod
- << "}",
+ << typeName(mod.type()) << " instead. For example: {$mod: {<field>: ...}}"
+ << " not {" << mod << "}",
mod.type() == BSONType::Object);
uassert(ErrorCodes::FailedToParse,
str::stream() << "'" << mod.fieldName()
<< "' is empty. You must specify a field like so: "
"{"
- << mod.fieldName()
- << ": {<field>: ...}}",
+ << mod.fieldName() << ": {<field>: ...}}",
!mod.embeddedObject().isEmpty());
return modType;
@@ -134,8 +129,7 @@ bool parseUpdateExpression(
for (const auto& arrayFilter : arrayFilters) {
uassert(ErrorCodes::FailedToParse,
str::stream() << "The array filter for identifier '" << arrayFilter.first
- << "' was not used in the update "
- << updateExpr,
+ << "' was not used in the update " << updateExpr,
foundIdentifiers.find(arrayFilter.first.toString()) != foundIdentifiers.end());
}
diff --git a/src/mongo/db/update/update_leaf_node.cpp b/src/mongo/db/update/update_leaf_node.cpp
index 5d1f8931b53..b09919772a2 100644
--- a/src/mongo/db/update/update_leaf_node.cpp
+++ b/src/mongo/db/update/update_leaf_node.cpp
@@ -52,13 +52,9 @@ void UpdateLeafNode::checkViability(mutablebson::Element element,
} else {
uasserted(ErrorCodes::PathNotViable,
str::stream() << "Cannot use the part (" << pathToCreate.getPart(0) << ") of ("
- << pathTaken.dottedField()
- << "."
- << pathToCreate.dottedField()
- << ") to traverse the element ({"
- << element.toString()
- << "})");
+ << pathTaken.dottedField() << "." << pathToCreate.dottedField()
+ << ") to traverse the element ({" << element.toString() << "})");
}
}
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/update/update_object_node.cpp b/src/mongo/db/update/update_object_node.cpp
index 6298b8389c5..27863d15ee1 100644
--- a/src/mongo/db/update/update_object_node.cpp
+++ b/src/mongo/db/update/update_object_node.cpp
@@ -62,8 +62,7 @@ StatusWith<std::string> parseArrayFilterIdentifier(
return Status(ErrorCodes::BadValue,
str::stream() << "Cannot have array filter identifier (i.e. '$[<id>]') "
"element in the first position in path '"
- << fieldRef.dottedField()
- << "'");
+ << fieldRef.dottedField() << "'");
}
auto identifier = field.substr(2, field.size() - 3);
@@ -71,9 +70,7 @@ StatusWith<std::string> parseArrayFilterIdentifier(
if (!identifier.empty() && arrayFilters.find(identifier) == arrayFilters.end()) {
return Status(ErrorCodes::BadValue,
str::stream() << "No array filter found for identifier '" << identifier
- << "' in path '"
- << fieldRef.dottedField()
- << "'");
+ << "' in path '" << fieldRef.dottedField() << "'");
}
if (!identifier.empty()) {
@@ -190,7 +187,7 @@ void applyChild(const UpdateNode& child,
BSONObj makeBSONForOperator(const std::vector<std::pair<std::string, BSONObj>>& updatesForOp) {
BSONObjBuilder bob;
- for (const auto & [ path, value ] : updatesForOp)
+ for (const auto& [path, value] : updatesForOp)
bob << path << value.firstElement();
return bob.obj();
}
@@ -228,8 +225,8 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
// be a string value.
if (BSONType::String != modExpr.type()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "The 'to' field for $rename must be a string: "
- << modExpr);
+ str::stream()
+ << "The 'to' field for $rename must be a string: " << modExpr);
}
fieldRef.parse(modExpr.valueStringData());
@@ -250,8 +247,7 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
if (positional && positionalCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << fieldRef.dottedField()
- << "'");
+ << fieldRef.dottedField() << "'");
}
if (positional && positionalIndex == 0) {
@@ -259,8 +255,7 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
ErrorCodes::BadValue,
str::stream()
<< "Cannot have positional (i.e. '$') element in the first position in path '"
- << fieldRef.dottedField()
- << "'");
+ << fieldRef.dottedField() << "'");
}
// Construct and initialize the leaf node.
@@ -298,8 +293,7 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
return Status(ErrorCodes::ConflictingUpdateOperators,
str::stream() << "Updating the path '" << fieldRef.dottedField()
<< "' would create a conflict at '"
- << fieldRef.dottedSubstring(0, i + 1)
- << "'");
+ << fieldRef.dottedSubstring(0, i + 1) << "'");
}
} else {
std::unique_ptr<UpdateInternalNode> ownedChild;
@@ -335,10 +329,9 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
if (current->getChild(childName)) {
return Status(ErrorCodes::ConflictingUpdateOperators,
- str::stream() << "Updating the path '" << fieldRef.dottedField()
- << "' would create a conflict at '"
- << fieldRef.dottedField()
- << "'");
+ str::stream()
+ << "Updating the path '" << fieldRef.dottedField()
+ << "' would create a conflict at '" << fieldRef.dottedField() << "'");
}
current->setChild(std::move(childName), std::move(leaf));
@@ -389,12 +382,12 @@ BSONObj UpdateObjectNode::serialize() const {
BSONObjBuilder bob;
- for (const auto & [ pathPrefix, child ] : _children) {
+ for (const auto& [pathPrefix, child] : _children) {
auto path = FieldRef(pathPrefix);
child->produceSerializationMap(&path, &operatorOrientedUpdates);
}
- for (const auto & [ op, updates ] : operatorOrientedUpdates)
+ for (const auto& [op, updates] : operatorOrientedUpdates)
bob << op << makeBSONForOperator(updates);
return bob.obj();
diff --git a/src/mongo/db/update/update_object_node.h b/src/mongo/db/update/update_object_node.h
index 6f9bed7357a..d7f2e56e9de 100644
--- a/src/mongo/db/update/update_object_node.h
+++ b/src/mongo/db/update/update_object_node.h
@@ -111,7 +111,7 @@ public:
FieldRef* currentPath,
std::map<std::string, std::vector<std::pair<std::string, BSONObj>>>*
operatorOrientedUpdates) const final {
- for (const auto & [ pathSuffix, child ] : _children) {
+ for (const auto& [pathSuffix, child] : _children) {
FieldRef::FieldRefTempAppend tempAppend(*currentPath, pathSuffix);
child->produceSerializationMap(currentPath, operatorOrientedUpdates);
}
diff --git a/src/mongo/db/update/update_serialization_test.cpp b/src/mongo/db/update/update_serialization_test.cpp
index 046efec9825..89ae2ac03c4 100644
--- a/src/mongo/db/update/update_serialization_test.cpp
+++ b/src/mongo/db/update/update_serialization_test.cpp
@@ -248,4 +248,4 @@ TEST(UpdateSerialization, CompoundStatementsSerialize) {
}
} // namespace
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/update_index_data.cpp b/src/mongo/db/update_index_data.cpp
index 539fcc27b67..8aad16e5552 100644
--- a/src/mongo/db/update_index_data.cpp
+++ b/src/mongo/db/update_index_data.cpp
@@ -112,4 +112,4 @@ FieldRef UpdateIndexData::getCanonicalIndexField(const FieldRef& path) {
return buf;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/update_index_data.h b/src/mongo/db/update_index_data.h
index aee2c968742..9477eab10e1 100644
--- a/src/mongo/db/update_index_data.h
+++ b/src/mongo/db/update_index_data.h
@@ -83,4 +83,4 @@ private:
bool _allPathsIndexed;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/update_index_data_test.cpp b/src/mongo/db/update_index_data_test.cpp
index c55f0235d09..ae230e70f30 100644
--- a/src/mongo/db/update_index_data_test.cpp
+++ b/src/mongo/db/update_index_data_test.cpp
@@ -129,4 +129,4 @@ TEST(UpdateIndexDataTest, CanonicalIndexFieldForNestedNumericFieldNames) {
ASSERT_EQ(UpdateIndexData::getCanonicalIndexField(FieldRef("a.0.b.1.2")), FieldRef("a.b"_sd));
ASSERT_EQ(UpdateIndexData::getCanonicalIndexField(FieldRef("a.01.02.b.c")), FieldRef("a"_sd));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp
index 193f7a6f432..3d969308c9a 100644
--- a/src/mongo/db/views/durable_view_catalog.cpp
+++ b/src/mongo/db/views/durable_view_catalog.cpp
@@ -170,9 +170,7 @@ BSONObj DurableViewCatalogImpl::_validateViewDefinition(OperationContext* opCtx,
uassert(ErrorCodes::InvalidViewDefinition,
str::stream() << "found invalid view definition " << viewDefinition["_id"]
- << " while reading '"
- << _db->getSystemViewsName()
- << "'",
+ << " while reading '" << _db->getSystemViewsName() << "'",
valid);
return viewDefinition;
diff --git a/src/mongo/db/views/resolved_view_test.cpp b/src/mongo/db/views/resolved_view_test.cpp
index b15ccab582d..a4b5111419a 100644
--- a/src/mongo/db/views/resolved_view_test.cpp
+++ b/src/mongo/db/views/resolved_view_test.cpp
@@ -57,9 +57,8 @@ TEST(ResolvedViewTest, ExpandingAggRequestWithEmptyPipelineOnNoOpViewYieldsEmpty
AggregationRequest requestOnView{viewNss, emptyPipeline};
auto result = resolvedView.asExpandedViewAggregation(requestOnView);
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
- << kDefaultCursorOptionDocument);
+ BSONObj expected = BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray()
+ << "cursor" << kDefaultCursorOptionDocument);
ASSERT_BSONOBJ_EQ(result.serializeToCommandObj().toBson(), expected);
}
@@ -72,8 +71,7 @@ TEST(ResolvedViewTest, ExpandingAggRequestWithNonemptyPipelineAppendsToViewPipel
BSONObj expected = BSON("aggregate" << backingNss.coll() << "pipeline"
<< BSON_ARRAY(BSON("skip" << 7) << BSON("limit" << 3))
- << "cursor"
- << kDefaultCursorOptionDocument);
+ << "cursor" << kDefaultCursorOptionDocument);
ASSERT_BSONOBJ_EQ(result.serializeToCommandObj().toBson(), expected);
}
@@ -216,9 +214,8 @@ TEST(ResolvedViewTest, FromBSONFailsOnInvalidPipelineType) {
}
TEST(ResolvedViewTest, FromBSONFailsOnInvalidCollationType) {
- BSONObj badCmdResponse =
- BSON("resolvedView" << BSON(
- "ns" << backingNss.ns() << "pipeline" << BSONArray() << "collation" << 1));
+ BSONObj badCmdResponse = BSON("resolvedView" << BSON("ns" << backingNss.ns() << "pipeline"
+ << BSONArray() << "collation" << 1));
ASSERT_THROWS_CODE(ResolvedView::fromBSON(badCmdResponse), AssertionException, 40639);
}
@@ -234,10 +231,10 @@ TEST(ResolvedViewTest, FromBSONSuccessfullyParsesEmptyBSONArrayIntoEmptyVector)
}
TEST(ResolvedViewTest, FromBSONSuccessfullyParsesCollation) {
- BSONObj cmdResponse = BSON(
- "resolvedView" << BSON("ns" << backingNss.ns() << "pipeline" << BSONArray() << "collation"
- << BSON("locale"
- << "fil")));
+ BSONObj cmdResponse = BSON("resolvedView" << BSON("ns" << backingNss.ns() << "pipeline"
+ << BSONArray() << "collation"
+ << BSON("locale"
+ << "fil")));
const ResolvedView result = ResolvedView::fromBSON(cmdResponse);
ASSERT_EQ(result.getNamespace(), backingNss);
ASSERT(std::equal(emptyPipeline.begin(),
@@ -257,8 +254,7 @@ TEST(ResolvedViewTest, FromBSONSuccessfullyParsesPopulatedBSONArrayIntoVector) {
BSONArray pipeline = BSON_ARRAY(matchStage << sortStage << limitStage);
BSONObj cmdResponse = BSON("resolvedView" << BSON("ns"
<< "testdb.testcoll"
- << "pipeline"
- << pipeline));
+ << "pipeline" << pipeline));
const ResolvedView result = ResolvedView::fromBSON(cmdResponse);
ASSERT_EQ(result.getNamespace(), backingNss);
@@ -274,8 +270,7 @@ TEST(ResolvedViewTest, IsResolvedViewErrorResponseDetectsKickbackErrorCodeSucces
BSONObj errorResponse =
BSON("ok" << 0 << "code" << ErrorCodes::CommandOnShardedViewNotSupportedOnMongod << "errmsg"
<< "This view is sharded and cannot be run on mongod"
- << "resolvedView"
- << BSON("ns" << backingNss.ns() << "pipeline" << BSONArray()));
+ << "resolvedView" << BSON("ns" << backingNss.ns() << "pipeline" << BSONArray()));
auto status = getStatusFromCommandResult(errorResponse);
ASSERT_EQ(status, ErrorCodes::CommandOnShardedViewNotSupportedOnMongod);
ASSERT(status.extraInfo<ResolvedView>());
diff --git a/src/mongo/db/views/view_catalog.cpp b/src/mongo/db/views/view_catalog.cpp
index 237a9495cf2..6019a012b1a 100644
--- a/src/mongo/db/views/view_catalog.cpp
+++ b/src/mongo/db/views/view_catalog.cpp
@@ -115,8 +115,7 @@ Status ViewCatalog::_reload(WithLock,
return Status(ErrorCodes::InvalidViewDefinition,
str::stream() << "View 'pipeline' entries must be objects, but "
<< viewName.toString()
- << " has a pipeline element of type "
- << stage.type());
+ << " has a pipeline element of type " << stage.type());
}
}
diff --git a/src/mongo/db/views/view_catalog_test.cpp b/src/mongo/db/views/view_catalog_test.cpp
index 87aa9340ba8..7eaab9fc6c7 100644
--- a/src/mongo/db/views/view_catalog_test.cpp
+++ b/src/mongo/db/views/view_catalog_test.cpp
@@ -256,8 +256,7 @@ TEST_F(ViewCatalogFixture, CanCreateViewWithLookupUsingPipelineSyntax) {
<< "fcoll"
<< "as"
<< "as"
- << "pipeline"
- << BSONArray()))),
+ << "pipeline" << BSONArray()))),
emptyCollation));
}
diff --git a/src/mongo/db/views/view_graph.cpp b/src/mongo/db/views/view_graph.cpp
index 7ecc1544e31..def5d50154d 100644
--- a/src/mongo/db/views/view_graph.cpp
+++ b/src/mongo/db/views/view_graph.cpp
@@ -110,8 +110,7 @@ Status ViewGraph::insertAndValidate(const ViewDefinition& view,
return {ErrorCodes::ViewPipelineMaxSizeExceeded,
str::stream() << "Operation would result in a resolved view pipeline that exceeds "
"the maximum size of "
- << kMaxViewPipelineSizeBytes
- << " bytes"};
+ << kMaxViewPipelineSizeBytes << " bytes"};
}
guard.dismiss();
@@ -217,8 +216,7 @@ Status ViewGraph::_validateParents(uint64_t currentId, int currentDepth, StatsMa
if (size > kMaxViewPipelineSizeBytes) {
return {ErrorCodes::ViewPipelineMaxSizeExceeded,
str::stream() << "View pipeline is too large and exceeds the maximum size of "
- << ViewGraph::kMaxViewPipelineSizeBytes
- << " bytes"};
+ << ViewGraph::kMaxViewPipelineSizeBytes << " bytes"};
}
return Status::OK();
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index a2cc613282d..ca87ea7d50c 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -51,9 +51,9 @@
namespace mongo {
-using std::string;
using repl::OpTime;
using repl::OpTimeAndWallTime;
+using std::string;
static TimerStats gleWtimeStats;
static ServerStatusMetricField<TimerStats> displayGleLatency("getLastError.wtime", &gleWtimeStats);
diff --git a/src/mongo/dbtests/basictests.cpp b/src/mongo/dbtests/basictests.cpp
index c6a53840094..c3160ef54bd 100644
--- a/src/mongo/dbtests/basictests.cpp
+++ b/src/mongo/dbtests/basictests.cpp
@@ -42,14 +42,14 @@
namespace BasicTests {
-using std::unique_ptr;
-using std::shared_ptr;
using std::cout;
using std::dec;
using std::endl;
using std::hex;
+using std::shared_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
class RarelyTest {
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index 1c71b2b8e84..bdb97bed3df 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -40,8 +40,8 @@
namespace ClientTests {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
class Base {
@@ -407,4 +407,4 @@ public:
};
SuiteInstance<All> all;
-}
+} // namespace ClientTests
diff --git a/src/mongo/dbtests/commandtests.cpp b/src/mongo/dbtests/commandtests.cpp
index 8be8065bf54..f8822b0a315 100644
--- a/src/mongo/dbtests/commandtests.cpp
+++ b/src/mongo/dbtests/commandtests.cpp
@@ -159,7 +159,7 @@ struct Type2 : Base {
ASSERT_EQUALS(string("5eb63bbbe01eeed093cb22bb8f5acdc3"), result["md5"].valuestr());
}
};
-}
+} // namespace FileMD5
namespace SymbolArgument {
// SERVER-16260
@@ -318,12 +318,10 @@ public:
cmd.append("indexes",
BSON_ARRAY(BSON("key" << BSON("loc"
<< "geoHaystack"
- << "z"
- << 1.0)
+ << "z" << 1.0)
<< "name"
<< "loc_geoHaystack_z_1"
- << "bucketSize"
- << static_cast<double>(0.7))));
+ << "bucketSize" << static_cast<double>(0.7))));
BSONObj result;
ASSERT(db.runCommand(nsDb(), cmd.obj(), result));
@@ -403,4 +401,4 @@ public:
};
SuiteInstance<All> all;
-}
+} // namespace CommandTests
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 36a59a07d90..9f820418793 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -58,12 +58,9 @@ public:
_collection = _database->createCollection(&_opCtx, nss());
IndexCatalog* indexCatalog = _collection->getIndexCatalog();
- auto indexSpec =
- BSON("v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion) << "ns" << ns()
- << "key"
- << BSON("a" << 1)
- << "name"
- << "a_1");
+ auto indexSpec = BSON("v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
+ << "ns" << ns() << "key" << BSON("a" << 1) << "name"
+ << "a_1");
uassertStatusOK(indexCatalog->createIndexOnEmptyCollection(&_opCtx, indexSpec));
wunit.commit();
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 1a3f889eb8e..427faa513de 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -45,8 +45,8 @@ namespace mongo {
namespace {
-using std::unique_ptr;
using std::set;
+using std::unique_ptr;
/**
* Unit tests related to DBHelpers
diff --git a/src/mongo/dbtests/deferred_writer.cpp b/src/mongo/dbtests/deferred_writer.cpp
index fe3122e5b54..da51278c392 100644
--- a/src/mongo/dbtests/deferred_writer.cpp
+++ b/src/mongo/dbtests/deferred_writer.cpp
@@ -33,7 +33,6 @@
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/client.h"
-#include "mongo/db/client.h"
#include "mongo/db/concurrency/deferred_writer.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
@@ -57,7 +56,7 @@ struct BSONObjCompare {
return SimpleBSONObjComparator::kInstance.compare(lhs, rhs) < 0;
}
};
-}
+} // namespace
static const NamespaceString kTestNamespace("unittests", "deferred_writer_tests");
@@ -384,4 +383,4 @@ public:
add<DeferredWriterTestAsync>();
}
} deferredWriterTests;
-}
+} // namespace deferred_writer_tests
diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp
index a87f38463cf..6ca684003d9 100644
--- a/src/mongo/dbtests/directclienttests.cpp
+++ b/src/mongo/dbtests/directclienttests.cpp
@@ -77,10 +77,7 @@ public:
BSONObj info;
BSONObj cmd = BSON("captrunc"
<< "b"
- << "n"
- << 1
- << "inc"
- << true);
+ << "n" << 1 << "inc" << true);
// cout << cmd.toString() << endl;
bool ok = client.runCommand("a", cmd, info);
// cout << info.toString() << endl;
diff --git a/src/mongo/dbtests/framework.h b/src/mongo/dbtests/framework.h
index a7a0f57090d..8ed12ba9faf 100644
--- a/src/mongo/dbtests/framework.h
+++ b/src/mongo/dbtests/framework.h
@@ -37,5 +37,5 @@
namespace mongo {
namespace dbtests {
int runDbTests(int argc, char** argv);
-} // dbtests
+} // namespace dbtests
} // namespace mongo
diff --git a/src/mongo/dbtests/framework_options.cpp b/src/mongo/dbtests/framework_options.cpp
index ea4f54b65d8..e24c9dd9898 100644
--- a/src/mongo/dbtests/framework_options.cpp
+++ b/src/mongo/dbtests/framework_options.cpp
@@ -138,4 +138,4 @@ Status storeTestFrameworkOptions(const moe::Environment& params,
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/framework_options.h b/src/mongo/dbtests/framework_options.h
index 602bef0b35c..b79b4eca905 100644
--- a/src/mongo/dbtests/framework_options.h
+++ b/src/mongo/dbtests/framework_options.h
@@ -68,4 +68,4 @@ bool handlePreValidationTestFrameworkOptions(const moe::Environment& params,
Status storeTestFrameworkOptions(const moe::Environment& params,
const std::vector<std::string>& args);
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/framework_options_init.cpp b/src/mongo/dbtests/framework_options_init.cpp
index 5fb68b995c0..9ecf7993499 100644
--- a/src/mongo/dbtests/framework_options_init.cpp
+++ b/src/mongo/dbtests/framework_options_init.cpp
@@ -67,4 +67,4 @@ MONGO_INITIALIZER_GENERAL(CoreOptions_Store, MONGO_NO_PREREQUISITES, MONGO_NO_DE
(InitializerContext* context) {
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index 512a3e4b087..9ac94209601 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -147,9 +147,7 @@ public:
&opCtx,
_nss.ns(),
BSON("name" << indexName << "ns" << _nss.ns() << "key" << BSON("x" << 1) << "v"
- << static_cast<int>(kIndexVersion)
- << "expireAfterSeconds"
- << 5)));
+ << static_cast<int>(kIndexVersion) << "expireAfterSeconds" << 5)));
const IndexDescriptor* desc = _catalog->findIndexByName(&opCtx, indexName);
ASSERT(desc);
@@ -194,4 +192,4 @@ public:
};
SuiteInstance<IndexCatalogTests> indexCatalogTests;
-}
+} // namespace IndexCatalogTests
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index bd479539a13..63ed34e3871 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -134,16 +134,9 @@ public:
const BSONObj spec = BSON("name"
<< "a"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "unique"
- << true
- << "background"
- << background);
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
+ << static_cast<int>(kIndexVersion) << "unique" << true
+ << "background" << background);
ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); });
@@ -189,16 +182,9 @@ public:
const BSONObj spec = BSON("name"
<< "a"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "unique"
- << true
- << "background"
- << background);
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
+ << static_cast<int>(kIndexVersion) << "unique" << true
+ << "background" << background);
ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); });
@@ -242,8 +228,7 @@ public:
getGlobalServiceContext()->setKillAllOperations();
BSONObj indexInfo = BSON("key" << BSON("a" << 1) << "ns" << _ns << "name"
<< "a_1"
- << "v"
- << static_cast<int>(kIndexVersion));
+ << "v" << static_cast<int>(kIndexVersion));
// The call is interrupted because mayInterrupt == true.
ASSERT_TRUE(buildIndexInterrupted(indexInfo));
// only want to interrupt the index build
@@ -286,8 +271,7 @@ public:
getGlobalServiceContext()->setKillAllOperations();
BSONObj indexInfo = BSON("key" << BSON("_id" << 1) << "ns" << _ns << "name"
<< "_id_"
- << "v"
- << static_cast<int>(kIndexVersion));
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_TRUE(buildIndexInterrupted(indexInfo));
// only want to interrupt the index build
getGlobalServiceContext()->unsetKillAllOperations();
@@ -333,11 +317,7 @@ public:
ASSERT_OK(createIndex("unittest",
BSON("name"
<< "x"
- << "ns"
- << _ns
- << "key"
- << BSON("x" << 1 << "y" << 1)
- << "v"
+ << "ns" << _ns << "key" << BSON("x" << 1 << "y" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
}
};
@@ -350,13 +330,8 @@ public:
createIndex("unittest",
BSON("name"
<< "x"
- << "ns"
- << _ns
- << "unique"
- << true
- << "key"
- << BSON("x" << 1 << "y" << 1)
- << "v"
+ << "ns" << _ns << "unique" << true << "key"
+ << BSON("x" << 1 << "y" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
}
};
@@ -367,11 +342,7 @@ public:
ASSERT_OK(createIndex("unittest",
BSON("name"
<< "x"
- << "ns"
- << _ns
- << "key"
- << BSON("x" << 1 << "y" << 1)
- << "v"
+ << "ns" << _ns << "key" << BSON("x" << 1 << "y" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
}
};
@@ -384,11 +355,7 @@ public:
createIndex("unittest",
BSON("name"
<< "x"
- << "ns"
- << _ns
- << "key"
- << BSON("y" << 1 << "x" << 1)
- << "v"
+ << "ns" << _ns << "key" << BSON("y" << 1 << "x" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
}
};
@@ -402,19 +369,11 @@ public:
ASSERT_OK(createIndex("unittests",
BSON("name"
<< "super"
- << "ns"
- << _ns
- << "unique"
- << 1
- << "sparse"
- << true
- << "expireAfterSeconds"
- << 3600
- << "key"
+ << "ns" << _ns << "unique" << 1 << "sparse" << true
+ << "expireAfterSeconds" << 3600 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -428,19 +387,11 @@ public:
createIndex("unittests",
BSON("name"
<< "super2"
- << "ns"
- << _ns
- << "expireAfterSeconds"
- << 3600
- << "sparse"
- << true
- << "unique"
- << 1
- << "key"
+ << "ns" << _ns << "expireAfterSeconds" << 3600 << "sparse"
+ << true << "unique" << 1 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -452,19 +403,11 @@ public:
ASSERT_OK(createIndex("unittests",
BSON("name"
<< "super"
- << "ns"
- << _ns
- << "expireAfterSeconds"
- << 3600
- << "sparse"
- << true
- << "unique"
- << 1
- << "key"
+ << "ns" << _ns << "expireAfterSeconds" << 3600 << "sparse"
+ << true << "unique" << 1 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -478,44 +421,27 @@ public:
createIndex("unittest",
BSON("name"
<< "super2"
- << "ns"
- << _ns
- << "unique"
- << false
- << "sparse"
- << true
- << "expireAfterSeconds"
- << 3600
- << "key"
+ << "ns" << _ns << "unique" << false << "sparse" << true
+ << "expireAfterSeconds" << 3600 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
class SameSpecDifferentSparse : public ComplexIndex {
public:
void run() {
- ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
- createIndex("unittest",
- BSON("name"
- << "super2"
- << "ns"
- << _ns
- << "unique"
- << 1
- << "sparse"
- << false
- << "background"
- << true
- << "expireAfterSeconds"
- << 3600
- << "key"
- << BSON("superIdx"
- << "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ ASSERT_EQUALS(
+ ErrorCodes::IndexOptionsConflict,
+ createIndex("unittest",
+ BSON("name"
+ << "super2"
+ << "ns" << _ns << "unique" << 1 << "sparse" << false << "background"
+ << true << "expireAfterSeconds" << 3600 << "key"
+ << BSON("superIdx"
+ << "2d")
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -526,19 +452,11 @@ public:
createIndex("unittest",
BSON("name"
<< "super2"
- << "ns"
- << _ns
- << "unique"
- << 1
- << "sparse"
- << true
- << "expireAfterSeconds"
- << 2400
- << "key"
+ << "ns" << _ns << "unique" << 1 << "sparse" << true
+ << "expireAfterSeconds" << 2400 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -585,14 +503,8 @@ protected:
BSONObj _createSpec(T storageEngineValue) {
return BSON("name"
<< "super2"
- << "ns"
- << _ns
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "storageEngine"
- << storageEngineValue);
+ << "ns" << _ns << "key" << BSON("a" << 1) << "v"
+ << static_cast<int>(kIndexVersion) << "storageEngine" << storageEngineValue);
}
};
diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp
index aa4db803b11..56a58e6e852 100644
--- a/src/mongo/dbtests/jsobjtests.cpp
+++ b/src/mongo/dbtests/jsobjtests.cpp
@@ -1163,18 +1163,13 @@ class LabelShares : public LabelBase {
BSONObj expected() {
return BSON("z"
<< "q"
- << "a"
- << (BSON("$gt" << 1))
- << "x"
+ << "a" << (BSON("$gt" << 1)) << "x"
<< "p");
}
BSONObj actual() {
return BSON("z"
<< "q"
- << "a"
- << GT
- << 1
- << "x"
+ << "a" << GT << 1 << "x"
<< "p");
}
};
@@ -1202,11 +1197,7 @@ class LabelDoubleShares : public LabelBase {
BSONObj actual() {
return BSON("z"
<< "q"
- << "a"
- << GT
- << 1
- << LTE
- << "x"
+ << "a" << GT << 1 << LTE << "x"
<< "x"
<< "p");
}
@@ -1231,27 +1222,15 @@ class LabelMulti : public LabelBase {
<< "b"
<< BSON("$ne" << 1 << "$ne"
<< "f"
- << "$ne"
- << 22.3)
+ << "$ne" << 22.3)
<< "x"
<< "p");
}
BSONObj actual() {
return BSON("z"
<< "q"
- << "a"
- << GT
- << 1
- << LTE
- << "x"
- << "b"
- << NE
- << 1
- << NE
- << "f"
- << NE
- << 22.3
- << "x"
+ << "a" << GT << 1 << LTE << "x"
+ << "b" << NE << 1 << NE << "f" << NE << 22.3 << "x"
<< "p");
}
};
@@ -1261,8 +1240,7 @@ class LabelishOr : public LabelBase {
<< "x"))
<< BSON("b" << BSON("$ne" << 1 << "$ne"
<< "f"
- << "$ne"
- << 22.3))
+ << "$ne" << 22.3))
<< BSON("x"
<< "p")));
}
@@ -1614,9 +1592,7 @@ struct BSONArrayBuilderTest {
BSONObjBuilder objb;
BSONArrayBuilder arrb;
- auto fieldNameGenerator = [i = 0]() mutable {
- return std::to_string(i++);
- };
+ auto fieldNameGenerator = [i = 0]() mutable { return std::to_string(i++); };
objb << fieldNameGenerator() << 100;
arrb << 100;
@@ -1630,8 +1606,9 @@ struct BSONArrayBuilderTest {
objb << fieldNameGenerator() << string("World");
arrb << string("World");
- objb << fieldNameGenerator() << BSON("a" << 1 << "b"
- << "foo");
+ objb << fieldNameGenerator()
+ << BSON("a" << 1 << "b"
+ << "foo");
arrb << BSON("a" << 1 << "b"
<< "foo");
@@ -1685,14 +1662,13 @@ struct BSONArrayBuilderTest {
struct ArrayMacroTest {
void run() {
- BSONArray arr = BSON_ARRAY("hello" << 1 << BSON("foo" << BSON_ARRAY("bar"
- << "baz"
- << "qux")));
+ BSONArray arr = BSON_ARRAY("hello" << 1
+ << BSON("foo" << BSON_ARRAY("bar"
+ << "baz"
+ << "qux")));
BSONObj obj = BSON("0"
<< "hello"
- << "1"
- << 1
- << "2"
+ << "1" << 1 << "2"
<< BSON("foo" << BSON_ARRAY("bar"
<< "baz"
<< "qux")));
@@ -1799,38 +1775,26 @@ public:
// DBRef stuff -- json parser can't handle this yet
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1)));
+ << "$id" << 1)));
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "$db"
+ << "$id" << 1 << "$db"
<< "a")));
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "stuff"
- << 1)));
+ << "$id" << 1 << "stuff" << 1)));
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "$db"
+ << "$id" << 1 << "$db"
<< "a"
- << "stuff"
- << 1)));
+ << "stuff" << 1)));
bad(BSON("a" << BSON("$ref" << 1 << "$id" << 1)));
bad(BSON("a" << BSON("$ref" << 1 << "$id" << 1 << "$db"
<< "a")));
bad(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "$db"
- << 1)));
+ << "$id" << 1 << "$db" << 1)));
bad(BSON("a" << BSON("$ref"
<< "coll")));
bad(BSON("a" << BSON("$ref"
@@ -1842,10 +1806,7 @@ public:
<< "coll")));
bad(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "$hater"
- << 1)));
+ << "$id" << 1 << "$hater" << 1)));
}
};
diff --git a/src/mongo/dbtests/jsontests.cpp b/src/mongo/dbtests/jsontests.cpp
index c70457b73e8..69476f19b77 100644
--- a/src/mongo/dbtests/jsontests.cpp
+++ b/src/mongo/dbtests/jsontests.cpp
@@ -939,7 +939,8 @@ TEST(FromJsonTest, NumericTypes) {
double d;
};
const Val vals[] = {
- {123, kMaxS64, 3.14}, {-123, -kMaxS64, -3.14},
+ {123, kMaxS64, 3.14},
+ {-123, -kMaxS64, -3.14},
};
for (const Val& val : vals) {
const BSONObj obj =
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
index 051bc478dbc..44ccf7b6b5a 100644
--- a/src/mongo/dbtests/jstests.cpp
+++ b/src/mongo/dbtests/jstests.cpp
@@ -411,8 +411,7 @@ public:
<< "eliot"
<< "z"
<< "sara"
- << "zz"
- << BSONObj());
+ << "zz" << BSONObj());
s->setObject("blah", o, true);
BSONObj out;
@@ -1239,7 +1238,22 @@ class NovelNaN {
public:
void run() {
uint8_t bits[] = {
- 16, 0, 0, 0, 0x01, 'a', '\0', 0x61, 0x79, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
+ 16,
+ 0,
+ 0,
+ 0,
+ 0x01,
+ 'a',
+ '\0',
+ 0x61,
+ 0x79,
+ 0xfe,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0,
};
unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
index 41ca3fb7a14..59abe29a460 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
@@ -182,4 +182,4 @@ void MockDBClientConnection::checkConnection() {
_remoteServerInstanceID = _remoteServer->getInstanceID();
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.h b/src/mongo/dbtests/mock/mock_dbclient_connection.h
index aaa4968d58e..afe818fb4ae 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.h
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.h
@@ -134,4 +134,4 @@ private:
uint64_t _sockCreationTime;
bool _autoReconnect;
};
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.cpp b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
index f1253cc7fd9..6488e5023ff 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.cpp
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
@@ -239,4 +239,4 @@ void MockRemoteDBServer::checkIfUp(InstanceID id) const {
throwSocketError(mongo::SocketErrorKind::CLOSED, _hostAndPort);
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock/mock_replica_set.cpp b/src/mongo/dbtests/mock/mock_replica_set.cpp
index fb17ad1a960..a028f8a0530 100644
--- a/src/mongo/dbtests/mock/mock_replica_set.cpp
+++ b/src/mongo/dbtests/mock/mock_replica_set.cpp
@@ -350,4 +350,4 @@ void MockReplicaSet::mockReplSetGetStatusCmd() {
node->setCommandReply("replSetGetStatus", fullStatBuilder.done());
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock/mock_replica_set.h b/src/mongo/dbtests/mock/mock_replica_set.h
index a2f442d8beb..01929b0e203 100644
--- a/src/mongo/dbtests/mock/mock_replica_set.h
+++ b/src/mongo/dbtests/mock/mock_replica_set.h
@@ -150,4 +150,4 @@ private:
std::string _primaryHost;
};
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock_dbclient_conn_test.cpp b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
index 555f982002b..236b80d45d4 100644
--- a/src/mongo/dbtests/mock_dbclient_conn_test.cpp
+++ b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
@@ -414,16 +414,10 @@ TEST(MockDBClientConnTest, CyclingCmd) {
vector<BSONObj> isMasterSequence;
isMasterSequence.push_back(BSON("set"
<< "a"
- << "isMaster"
- << true
- << "ok"
- << 1));
+ << "isMaster" << true << "ok" << 1));
isMasterSequence.push_back(BSON("set"
<< "a"
- << "isMaster"
- << false
- << "ok"
- << 1));
+ << "isMaster" << false << "ok" << 1));
server.setCommandReply("isMaster", isMasterSequence);
}
@@ -630,4 +624,4 @@ TEST(MockDBClientConnTest, Delay) {
ASSERT_EQUALS(1U, server.getQueryCount());
ASSERT_EQUALS(1U, server.getCmdCount());
}
-}
+} // namespace mongo_test
diff --git a/src/mongo/dbtests/mock_replica_set_test.cpp b/src/mongo/dbtests/mock_replica_set_test.cpp
index 18f3a93a444..398c6dfd85d 100644
--- a/src/mongo/dbtests/mock_replica_set_test.cpp
+++ b/src/mongo/dbtests/mock_replica_set_test.cpp
@@ -424,4 +424,4 @@ TEST(MockReplicaSetTest, KillMultipleNode) {
const string priHostName(replSet.getPrimary());
ASSERT(replSet.getNode(priHostName)->isRunning());
}
-}
+} // namespace mongo_test
diff --git a/src/mongo/dbtests/multikey_paths_test.cpp b/src/mongo/dbtests/multikey_paths_test.cpp
index d12ce069a54..8f0759f69b1 100644
--- a/src/mongo/dbtests/multikey_paths_test.cpp
+++ b/src/mongo/dbtests/multikey_paths_test.cpp
@@ -96,8 +96,7 @@ public:
const bool match = (expectedMultikeyPaths == actualMultikeyPaths);
if (!match) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expectedMultikeyPaths)
- << ", Actual: "
- << dumpMultikeyPaths(actualMultikeyPaths));
+ << ", Actual: " << dumpMultikeyPaths(actualMultikeyPaths));
}
ASSERT_TRUE(match);
}
@@ -143,11 +142,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreation) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -177,11 +172,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreationWithMultipleDocuments) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -197,11 +188,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentInsert) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -239,11 +226,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdate) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -292,11 +275,7 @@ TEST_F(MultikeyPathsTest, PathsNotUpdatedOnDocumentDelete) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -337,11 +316,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedForMultipleIndexesOnDocumentInsert) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPatternAB
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPatternAB << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -349,11 +324,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedForMultipleIndexesOnDocumentInsert) {
createIndex(collection,
BSON("name"
<< "a_1_c_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPatternAC
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPatternAC << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
{
diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
index 48e9a7adfb1..61eedeab72f 100644
--- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp
+++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
@@ -354,8 +354,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanDiesOnCollectionRenameWithinDatabas
ASSERT_TRUE(_client.runCommand("admin",
BSON("renameCollection" << nss.ns() << "to"
<< "unittests.new_collection_name"
- << "dropTarget"
- << true),
+ << "dropTarget" << true),
info));
ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
@@ -381,8 +380,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanDiesOnCollectionRenameWithinDatabase)
ASSERT_TRUE(_client.runCommand("admin",
BSON("renameCollection" << nss.ns() << "to"
<< "unittests.new_collection_name"
- << "dropTarget"
- << true),
+ << "dropTarget" << true),
info));
ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index a9fe6623848..1433e8a2558 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -715,4 +715,4 @@ public:
SuiteInstance<All> planRankingAll;
-} // namespace PlanRankingTest
+} // namespace PlanRankingTests
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index 49f6d6429cc..29677d86e8c 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -65,7 +65,7 @@ std::unique_ptr<CanonicalQuery> canonicalQueryFromFilterObj(OperationContext* op
uassertStatusOK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
-}
+} // namespace
class QueryStageCachedPlan : public unittest::Test {
public:
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 003beb748d9..87e66a58918 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -75,10 +75,7 @@ public:
->createIndexOnEmptyCollection(&_opCtx,
BSON("key" << BSON("x" << 1) << "name"
<< "x_1"
- << "ns"
- << ns()
- << "v"
- << 1))
+ << "ns" << ns() << "v" << 1))
.status_with_transitional_ignore();
for (int i = 0; i < kDocuments; i++) {
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index d38c26eb009..3217ebd979b 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -62,8 +62,7 @@ public:
ASSERT_OK(_coll->getIndexCatalog()->createIndexOnEmptyCollection(
&_opCtx,
BSON("ns" << ns() << "key" << BSON("x" << 1) << "name"
- << DBClientBase::genIndexName(BSON("x" << 1))
- << "v"
+ << DBClientBase::genIndexName(BSON("x" << 1)) << "v"
<< static_cast<int>(kIndexVersion))));
wunit.commit();
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 5656174f7b3..e4e564aef78 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -54,10 +54,10 @@
namespace QueryStageMergeSortTests {
+using std::make_unique;
using std::set;
using std::string;
using std::unique_ptr;
-using std::make_unique;
class QueryStageMergeSortTestBase {
public:
@@ -884,4 +884,4 @@ public:
SuiteInstance<All> queryStageMergeSortTest;
-} // namespace
+} // namespace QueryStageMergeSortTests
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index df400c3bd1f..40f5ba3a5c5 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -234,4 +234,4 @@ TEST_F(QueryStageNearTest, EmptyResults) {
ASSERT_EQUALS(results.size(), 3u);
assertAscendingAndValid(results);
}
-}
+} // namespace
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 1982273bc79..5b855933793 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -598,4 +598,4 @@ public:
SuiteInstance<All> queryStageSortTest;
-} // namespace
+} // namespace QueryStageSortTests
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index c369ce03b7f..be533ba7142 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -120,8 +120,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanGeo2dOr) {
dbtests::WriteContextForTests ctx(opCtx(), nss.ns());
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
addIndex(BSON("a"
<< "2d"));
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index ffa55f0040b..f9178555ce2 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -242,4 +242,4 @@ public:
SuiteInstance<All> queryStageTestsAll;
-} // namespace
+} // namespace QueryStageTests
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index adafcaadccd..c6c67923ae5 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -66,9 +66,9 @@
namespace QueryStageUpdate {
+using std::make_unique;
using std::unique_ptr;
using std::vector;
-using std::make_unique;
static const NamespaceString nss("unittests.QueryStageUpdate");
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 0114a3563d5..6e74f6c1571 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -57,9 +57,9 @@
namespace {
namespace QueryTests {
-using std::unique_ptr;
using std::endl;
using std::string;
+using std::unique_ptr;
using std::vector;
class Base {
@@ -234,8 +234,7 @@ public:
bool ok = cl.runCommand("unittests",
BSON("godinsert"
<< "querytests"
- << "obj"
- << BSONObj()),
+ << "obj" << BSONObj()),
info);
ASSERT(ok);
@@ -650,12 +649,7 @@ public:
_client.runCommand("unittests",
BSON("create"
<< "querytests.TailableQueryOnId"
- << "capped"
- << true
- << "size"
- << 8192
- << "autoIndexId"
- << true),
+ << "capped" << true << "size" << 8192 << "autoIndexId" << true),
info);
insertA(ns, 0);
insertA(ns, 1);
@@ -1537,12 +1531,7 @@ public:
_client.runCommand("local",
BSON("create"
<< "oplog.querytests.findingstart"
- << "capped"
- << true
- << "size"
- << 4096
- << "autoIndexId"
- << false),
+ << "capped" << true << "size" << 4096 << "autoIndexId" << false),
info);
// WiredTiger storage engines forbid dropping of the oplog. Evergreen reuses nodes for
// testing, so the oplog may already exist on the test node; in this case, trying to create
@@ -1611,12 +1600,7 @@ public:
_client.runCommand("local",
BSON("create"
<< "oplog.querytests.findingstart"
- << "capped"
- << true
- << "size"
- << 4096
- << "autoIndexId"
- << false),
+ << "capped" << true << "size" << 4096 << "autoIndexId" << false),
info);
// WiredTiger storage engines forbid dropping of the oplog. Evergreen reuses nodes for
// testing, so the oplog may already exist on the test node; in this case, trying to create
@@ -1690,12 +1674,7 @@ public:
_client.runCommand("local",
BSON("create"
<< "oplog.querytests.findingstart"
- << "capped"
- << true
- << "size"
- << 4096
- << "autoIndexId"
- << false),
+ << "capped" << true << "size" << 4096 << "autoIndexId" << false),
info);
// WiredTiger storage engines forbid dropping of the oplog. Evergreen reuses nodes for
// testing, so the oplog may already exist on the test node; in this case, trying to create
@@ -1792,10 +1771,7 @@ public:
ASSERT(_client.runCommand("unittests",
BSON("create"
<< "querytests.exhaust"
- << "capped"
- << true
- << "size"
- << 8192),
+ << "capped" << true << "size" << 8192),
info));
_client.insert(ns(), BSON("ts" << Timestamp(1000, 0)));
Message message;
diff --git a/src/mongo/dbtests/replica_set_monitor_test.cpp b/src/mongo/dbtests/replica_set_monitor_test.cpp
index 4477d727839..81472049c19 100644
--- a/src/mongo/dbtests/replica_set_monitor_test.cpp
+++ b/src/mongo/dbtests/replica_set_monitor_test.cpp
@@ -45,10 +45,10 @@ namespace mongo {
namespace {
using std::map;
-using std::vector;
using std::set;
using std::string;
using std::unique_ptr;
+using std::vector;
using unittest::assertGet;
MONGO_INITIALIZER(DisableReplicaSetMonitorRefreshRetries)(InitializerContext*) {
@@ -216,22 +216,24 @@ protected:
const string host(_replSet->getPrimary());
const mongo::repl::MemberConfig* member =
oldConfig.findMemberByHostAndPort(HostAndPort(host));
- membersBuilder.append(BSON(
- "_id" << member->getId().getData() << "host" << host << "tags" << BSON("dc"
- << "ny"
- << "num"
- << "1")));
+ membersBuilder.append(BSON("_id" << member->getId().getData() << "host" << host
+ << "tags"
+ << BSON("dc"
+ << "ny"
+ << "num"
+ << "1")));
}
{
const string host(_replSet->getSecondaries().front());
const mongo::repl::MemberConfig* member =
oldConfig.findMemberByHostAndPort(HostAndPort(host));
- membersBuilder.append(BSON(
- "_id" << member->getId().getData() << "host" << host << "tags" << BSON("dc"
- << "ny"
- << "num"
- << "2")));
+ membersBuilder.append(BSON("_id" << member->getId().getData() << "host" << host
+ << "tags"
+ << BSON("dc"
+ << "ny"
+ << "num"
+ << "2")));
}
membersBuilder.done();
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index cc19591c47e..8a7edd7d7b2 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -55,10 +55,10 @@ using namespace mongo::repl;
namespace ReplTests {
-using std::unique_ptr;
using std::endl;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
/**
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index 4dfcfee6c66..317be739e89 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -42,10 +42,10 @@
#include "mongo/dbtests/dbtests.h"
#include "mongo/unittest/unittest.h"
-using std::unique_ptr;
+using mongo::unittest::assertGet;
using std::list;
using std::string;
-using mongo::unittest::assertGet;
+using std::unique_ptr;
namespace RollbackTests {
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index 4a0d996cd55..a42955f4646 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -124,7 +124,7 @@ public:
private:
OperationContext* _opCtx;
};
-}
+} // namespace
const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
@@ -258,12 +258,12 @@ public:
BSONObj indexInfoObj;
{
- auto swIndexInfoObj = indexer.init(
- _opCtx,
- coll,
- {BSON("v" << 2 << "name" << indexName << "ns" << coll->ns().ns() << "key"
- << indexKey)},
- MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, coll));
+ auto swIndexInfoObj =
+ indexer.init(_opCtx,
+ coll,
+ {BSON("v" << 2 << "name" << indexName << "ns" << coll->ns().ns()
+ << "key" << indexKey)},
+ MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, coll));
ASSERT_OK(swIndexInfoObj.getStatus());
indexInfoObj = std::move(swIndexInfoObj.getValue()[0]);
}
@@ -389,11 +389,11 @@ public:
const BSONObj& expectedDoc) {
OneOffRead oor(_opCtx, ts);
if (expectedDoc.isEmpty()) {
- ASSERT_EQ(0, itCount(coll)) << "Should not find any documents in " << coll->ns()
- << " at ts: " << ts;
+ ASSERT_EQ(0, itCount(coll))
+ << "Should not find any documents in " << coll->ns() << " at ts: " << ts;
} else {
- ASSERT_EQ(1, itCount(coll)) << "Should find one document in " << coll->ns()
- << " at ts: " << ts;
+ ASSERT_EQ(1, itCount(coll))
+ << "Should find one document in " << coll->ns() << " at ts: " << ts;
auto doc = findOne(coll);
ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(doc, expectedDoc))
<< "Doc: " << doc.toString() << " Expected: " << expectedDoc.toString();
@@ -670,8 +670,7 @@ public:
const bool match = (expectedMultikeyPaths == actualMultikeyPaths);
if (!match) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expectedMultikeyPaths)
- << ", Actual: "
- << dumpMultikeyPaths(actualMultikeyPaths));
+ << ", Actual: " << dumpMultikeyPaths(actualMultikeyPaths));
}
ASSERT_TRUE(match);
}
@@ -706,23 +705,16 @@ public:
nss.db().toString(),
BSON("applyOps" << BSON_ARRAY(
BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" << 1LL
- << "v"
- << 2
- << "op"
+ << "v" << 2 << "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << BSON("_id" << idx))
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << BSON("_id" << idx))
<< BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" << 1LL
<< "op"
<< "c"
<< "ns"
<< "test.$cmd"
- << "o"
- << BSON("applyOps" << BSONArrayBuilder().obj())))),
+ << "o" << BSON("applyOps" << BSONArrayBuilder().obj())))),
repl::OplogApplication::Mode::kApplyOpsCmd,
&result));
}
@@ -824,20 +816,14 @@ public:
// Delete all documents one at a time.
const LogicalTime startDeleteTime = _clock->reserveTicks(docsToInsert);
for (std::int32_t num = 0; num < docsToInsert; ++num) {
- ASSERT_OK(
- doNonAtomicApplyOps(
- nss.db().toString(),
- {BSON("ts" << startDeleteTime.addTicks(num).asTimestamp() << "t" << 0LL << "v"
- << 2
- << "op"
- << "d"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << BSON("_id" << num))})
- .getStatus());
+ ASSERT_OK(doNonAtomicApplyOps(
+ nss.db().toString(),
+ {BSON("ts" << startDeleteTime.addTicks(num).asTimestamp() << "t" << 0LL
+ << "v" << 2 << "op"
+ << "d"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << BSON("_id" << num))})
+ .getStatus());
}
for (std::int32_t num = 0; num <= docsToInsert; ++num) {
@@ -889,22 +875,14 @@ public:
const LogicalTime firstUpdateTime = _clock->reserveTicks(updates.size());
for (std::size_t idx = 0; idx < updates.size(); ++idx) {
- ASSERT_OK(
- doNonAtomicApplyOps(
- nss.db().toString(),
- {BSON("ts" << firstUpdateTime.addTicks(idx).asTimestamp() << "t" << 0LL << "v"
- << 2
- << "op"
- << "u"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o2"
- << BSON("_id" << 0)
- << "o"
- << updates[idx].first)})
- .getStatus());
+ ASSERT_OK(doNonAtomicApplyOps(
+ nss.db().toString(),
+ {BSON("ts" << firstUpdateTime.addTicks(idx).asTimestamp() << "t" << 0LL
+ << "v" << 2 << "op"
+ << "u"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o2" << BSON("_id" << 0) << "o" << updates[idx].first)})
+ .getStatus());
}
for (std::size_t idx = 0; idx < updates.size(); ++idx) {
@@ -941,19 +919,11 @@ public:
nss.db().toString(),
{BSON("ts" << insertTime.asTimestamp() << "t" << 1LL << "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid() << "o"
<< BSON("_id" << 0 << "field" << 0)),
BSON("ts" << insertTime.addTicks(1).asTimestamp() << "t" << 1LL << "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid() << "o"
<< BSON("_id" << 0))}));
ASSERT_EQ(2, result.getIntField("applied"));
@@ -992,23 +962,16 @@ public:
// Reserve a timestamp before the inserts should happen.
const LogicalTime preInsertTimestamp = _clock->reserveTicks(1);
- auto swResult = doAtomicApplyOps(nss.db().toString(),
- {BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << BSON("_id" << 0)),
- BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << BSON("_id" << 1))});
+ auto swResult =
+ doAtomicApplyOps(nss.db().toString(),
+ {BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << BSON("_id" << 0)),
+ BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << BSON("_id" << 1))});
ASSERT_OK(swResult);
ASSERT_EQ(2, swResult.getValue().getIntField("applied"));
@@ -1051,23 +1014,16 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
const LogicalTime preInsertTimestamp = _clock->reserveTicks(1);
- auto swResult = doAtomicApplyOps(nss.db().toString(),
- {BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << BSON("_id" << 0 << "field" << 0)),
- BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << BSON("_id" << 0))});
+ auto swResult =
+ doAtomicApplyOps(nss.db().toString(),
+ {BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << BSON("_id" << 0 << "field" << 0)),
+ BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << BSON("_id" << 0))});
ASSERT_OK(swResult);
ASSERT_EQ(2, swResult.getValue().getIntField("applied"));
@@ -1105,17 +1061,14 @@ public:
{ ASSERT_FALSE(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
BSONObjBuilder resultBuilder;
- auto swResult = doNonAtomicApplyOps(nss.db().toString(),
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << UUID::gen()
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
- << BSON("create" << nss.coll())),
- });
+ auto swResult = doNonAtomicApplyOps(
+ nss.db().toString(),
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << UUID::gen() << "ns" << nss.getCommandNS().ns() << "o"
+ << BSON("create" << nss.coll())),
+ });
ASSERT_OK(swResult);
{ ASSERT(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
@@ -1146,25 +1099,18 @@ public:
const Timestamp dummyTs = dummyLt.asTimestamp();
BSONObjBuilder resultBuilder;
- auto swResult = doNonAtomicApplyOps(dbName,
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << UUID::gen()
- << "ns"
- << nss1.getCommandNS().ns()
- << "o"
- << BSON("create" << nss1.coll())),
- BSON("ts" << futureTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << UUID::gen()
- << "ns"
- << nss2.getCommandNS().ns()
- << "o"
- << BSON("create" << nss2.coll())),
- });
+ auto swResult = doNonAtomicApplyOps(
+ dbName,
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << UUID::gen() << "ns" << nss1.getCommandNS().ns() << "o"
+ << BSON("create" << nss1.coll())),
+ BSON("ts" << futureTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << UUID::gen() << "ns" << nss2.getCommandNS().ns() << "o"
+ << BSON("create" << nss2.coll())),
+ });
ASSERT_OK(swResult);
{ ASSERT(AutoGetCollectionForReadCommand(_opCtx, nss1).getCollection()); }
@@ -1212,33 +1158,21 @@ public:
{ ASSERT_FALSE(AutoGetCollectionForReadCommand(_opCtx, nss2).getCollection()); }
BSONObjBuilder resultBuilder;
- auto swResult = doNonAtomicApplyOps(dbName,
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "i"
- << "ns"
- << nss1.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << doc1),
- BSON("ts" << futureTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << uuid2
- << "ns"
- << nss2.getCommandNS().ns()
- << "o"
- << BSON("create" << nss2.coll())),
- BSON("ts" << insert2Ts << "t" << 1LL << "op"
- << "i"
- << "ns"
- << nss2.ns()
- << "ui"
- << uuid2
- << "o"
- << doc2),
- });
+ auto swResult = doNonAtomicApplyOps(
+ dbName,
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "i"
+ << "ns" << nss1.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << doc1),
+ BSON("ts" << futureTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << uuid2 << "ns" << nss2.getCommandNS().ns() << "o"
+ << BSON("create" << nss2.coll())),
+ BSON("ts" << insert2Ts << "t" << 1LL << "op"
+ << "i"
+ << "ns" << nss2.ns() << "ui" << uuid2 << "o" << doc2),
+ });
ASSERT_OK(swResult);
}
@@ -1283,17 +1217,14 @@ public:
{ ASSERT_FALSE(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
BSONObjBuilder resultBuilder;
- auto swResult = doNonAtomicApplyOps(nss.db().toString(),
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << UUID::gen()
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
- << BSON("create" << nss.coll())),
- });
+ auto swResult = doNonAtomicApplyOps(
+ nss.db().toString(),
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << UUID::gen() << "ns" << nss.getCommandNS().ns() << "o"
+ << BSON("create" << nss.coll())),
+ });
ASSERT_OK(swResult);
{ ASSERT(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
@@ -1331,9 +1262,8 @@ public:
uuid = autoColl.getCollection()->uuid();
}
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
_coordinatorMock->alwaysAllowWrites(false);
@@ -1349,30 +1279,15 @@ public:
auto op0 = repl::OplogEntry(BSON("ts" << insertTime0.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc0));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc0));
auto op1 = repl::OplogEntry(BSON("ts" << insertTime1.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc1));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc1));
auto op2 = repl::OplogEntry(BSON("ts" << insertTime2.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc2));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc2));
std::vector<repl::OplogEntry> ops = {op0, op1, op2};
DoNothingOplogApplierObserver observer;
@@ -1416,9 +1331,8 @@ public:
uuid = autoColl.getCollection()->uuid();
}
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
_coordinatorMock->alwaysAllowWrites(false);
@@ -1436,45 +1350,23 @@ public:
auto op0 = repl::OplogEntry(BSON("ts" << insertTime0.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc0));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc0));
auto op1 = repl::OplogEntry(BSON("ts" << insertTime1.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc1));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc1));
auto op2 = repl::OplogEntry(BSON("ts" << insertTime2.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc2));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc2));
auto indexSpec2 = BSON("createIndexes" << nss.coll() << "ns" << nss.ns() << "v"
- << static_cast<int>(kIndexVersion)
- << "key"
- << BSON("b" << 1)
- << "name"
+ << static_cast<int>(kIndexVersion) << "key"
+ << BSON("b" << 1) << "name"
<< "b_1");
auto createIndexOp = repl::OplogEntry(
BSON("ts" << indexBuildTime.asTimestamp() << "t" << 1LL << "v" << 2 << "op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "ui"
- << uuid
- << "o"
- << indexSpec2));
+ << "ns" << nss.getCommandNS().ns() << "ui" << uuid << "o" << indexSpec2));
// We add in an index creation op to test that we restart tracking multikey path info
// after bulk index builds.
@@ -1535,9 +1427,8 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
const LogicalTime pastTime = _clock->reserveTicks(1);
@@ -1565,9 +1456,8 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
const LogicalTime pastTime = _clock->reserveTicks(1);
@@ -1598,9 +1488,8 @@ public:
reset(nss);
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
auto doc = BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2));
{
@@ -1990,10 +1879,7 @@ public:
autoColl.getCollection(),
{BSON("v" << 2 << "unique" << true << "name"
<< "a_1"
- << "ns"
- << nss.ns()
- << "key"
- << BSON("a" << 1))},
+ << "ns" << nss.ns() << "key" << BSON("a" << 1))},
MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, autoColl.getCollection()));
ASSERT_OK(swIndexInfoObj.getStatus());
indexInfoObj = std::move(swIndexInfoObj.getValue()[0]);
@@ -2101,10 +1987,7 @@ public:
autoColl.getCollection(),
{BSON("v" << 2 << "unique" << true << "name"
<< "a_1"
- << "ns"
- << nss.ns()
- << "key"
- << BSON("a" << 1))},
+ << "ns" << nss.ns() << "key" << BSON("a" << 1))},
MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, autoColl.getCollection()));
ASSERT_OK(swIndexInfoObj.getStatus());
indexInfoObj = std::move(swIndexInfoObj.getValue()[0]);
@@ -2259,8 +2142,7 @@ public:
const Timestamp indexAComplete = queryOplog(BSON("op"
<< "c"
- << "o.createIndexes"
- << nss.coll()
+ << "o.createIndexes" << nss.coll()
<< "o.name"
<< "a_1"))["ts"]
.timestamp();
@@ -2350,9 +2232,9 @@ public:
BSON("renameCollection" << nss.ns() << "to" << renamedNss.ns() << "dropTarget" << true),
renameResult);
- const auto createIndexesDocument = queryOplog(BSON("ns" << renamedNss.db() + ".$cmd"
- << "o.createIndexes"
- << BSON("$exists" << true)));
+ const auto createIndexesDocument =
+ queryOplog(BSON("ns" << renamedNss.db() + ".$cmd"
+ << "o.createIndexes" << BSON("$exists" << true)));
// Find index creation timestamps.
const auto createIndexesString =
@@ -2365,15 +2247,13 @@ public:
const Timestamp indexCreateInitTs = queryOplog(BSON("op"
<< "c"
- << "o.create"
- << tmpName.coll()))["ts"]
+ << "o.create" << tmpName.coll()))["ts"]
.timestamp();
const Timestamp indexAComplete = createIndexesDocument["ts"].timestamp();
const Timestamp indexBComplete = queryOplog(BSON("op"
<< "c"
- << "o.createIndexes"
- << tmpName.coll()
+ << "o.createIndexes" << tmpName.coll()
<< "o.name"
<< "b_1"))["ts"]
.timestamp();
@@ -2552,14 +2432,10 @@ public:
// Make a simple insert operation.
BSONObj doc0 = BSON("_id" << 0 << "a" << 0);
- auto insertOp = repl::OplogEntry(BSON("ts" << futureTs << "t" << 1LL << "v" << 2 << "op"
- << "i"
- << "ns"
- << ns.ns()
- << "ui"
- << uuid
- << "o"
- << doc0));
+ auto insertOp =
+ repl::OplogEntry(BSON("ts" << futureTs << "t" << 1LL << "v" << 2 << "op"
+ << "i"
+ << "ns" << ns.ns() << "ui" << uuid << "o" << doc0));
// Apply the operation.
auto storageInterface = repl::StorageInterface::get(_opCtx);
@@ -2642,20 +2518,14 @@ public:
}
auto indexSpec = BSON("createIndexes" << nss.coll() << "ns" << nss.ns() << "v"
- << static_cast<int>(kIndexVersion)
- << "key"
- << BSON("field" << 1)
- << "name"
+ << static_cast<int>(kIndexVersion) << "key"
+ << BSON("field" << 1) << "name"
<< "field_1");
auto createIndexOp = BSON("ts" << startBuildTs << "t" << 1LL << "v" << 2 << "op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "ui"
- << collUUID
- << "o"
- << indexSpec);
+ << "ns" << nss.getCommandNS().ns() << "ui" << collUUID
+ << "o" << indexSpec);
ASSERT_OK(doAtomicApplyOps(nss.db().toString(), {createIndexOp}));
@@ -2690,21 +2560,17 @@ public:
ASSERT_OK(createCollection(_opCtx,
viewNss.db().toString(),
BSON("create" << viewNss.coll() << "pipeline" << BSONArray()
- << "viewOn"
- << backingCollNss.coll())));
+ << "viewOn" << backingCollNss.coll())));
const Timestamp systemViewsCreateTs = queryOplog(BSON("op"
<< "c"
- << "ns"
- << (viewNss.db() + ".$cmd")
+ << "ns" << (viewNss.db() + ".$cmd")
<< "o.create"
<< "system.views"))["ts"]
.timestamp();
const Timestamp viewCreateTs = queryOplog(BSON("op"
<< "i"
- << "ns"
- << systemViewsNss.ns()
- << "o._id"
+ << "ns" << systemViewsNss.ns() << "o._id"
<< viewNss.ns()))["ts"]
.timestamp();
@@ -2721,11 +2587,11 @@ public:
AutoGetCollection autoColl(_opCtx, systemViewsNss, LockMode::MODE_IS);
assertDocumentAtTimestamp(autoColl.getCollection(), systemViewsCreateTs, BSONObj());
- assertDocumentAtTimestamp(
- autoColl.getCollection(),
- viewCreateTs,
- BSON("_id" << viewNss.ns() << "viewOn" << backingCollNss.coll() << "pipeline"
- << BSONArray()));
+ assertDocumentAtTimestamp(autoColl.getCollection(),
+ viewCreateTs,
+ BSON("_id" << viewNss.ns() << "viewOn"
+ << backingCollNss.coll() << "pipeline"
+ << BSONArray()));
}
}
};
@@ -2752,9 +2618,7 @@ public:
BSONObj result = queryOplog(BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o.create"
+ << "ns" << nss.getCommandNS().ns() << "o.create"
<< nss.coll()));
repl::OplogEntry op(result);
// The logOp() call for createCollection should have timestamp 'futureTs', which will also
@@ -2770,9 +2634,7 @@ public:
result = queryOplog(BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o.createIndexes"
+ << "ns" << nss.getCommandNS().ns() << "o.createIndexes"
<< nss.coll()));
repl::OplogEntry indexOp(result);
ASSERT_EQ(indexOp.getObject()["name"].str(), "user_1_db_1");
@@ -2976,17 +2838,13 @@ public:
assertFilteredDocumentAtTimestamp(coll, query2, nullTs, doc2);
// Implicit commit oplog entry should exist at commitEntryTs.
- const auto commitFilter = BSON(
- "ts" << commitEntryTs << "o" << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid()
- << "o"
- << doc2))
- << "count"
- << 2));
+ const auto commitFilter =
+ BSON("ts" << commitEntryTs << "o"
+ << BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui"
+ << coll->uuid() << "o" << doc2))
+ << "count" << 2));
assertOplogDocumentExistsAtTimestamp(commitFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(commitFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(commitFilter, firstOplogEntryTs, false);
@@ -3006,14 +2864,9 @@ public:
BSON("ts" << firstOplogEntryTs << "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid()
- << "o"
- << doc))
- << "partialTxn"
- << true));
+ << "ns" << nss.ns() << "ui"
+ << coll->uuid() << "o" << doc))
+ << "partialTxn" << true));
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, firstOplogEntryTs, true);
@@ -3185,14 +3038,9 @@ public:
BSON("ts" << firstOplogEntryTs << "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid()
- << "o"
- << doc))
- << "partialTxn"
- << true));
+ << "ns" << nss.ns() << "ui"
+ << coll->uuid() << "o" << doc))
+ << "partialTxn" << true));
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, firstOplogEntryTs, true);
@@ -3200,19 +3048,13 @@ public:
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, commitEntryTs, true);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, nullTs, true);
// The prepare oplog entry should exist at prepareEntryTs and onwards.
- const auto prepareOplogEntryFilter = BSON(
- "ts" << prepareEntryTs << "o" << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid()
- << "o"
- << doc2))
- << "prepare"
- << true
- << "count"
- << 2));
+ const auto prepareOplogEntryFilter =
+ BSON("ts" << prepareEntryTs << "o"
+ << BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui"
+ << coll->uuid() << "o" << doc2))
+ << "prepare" << true << "count" << 2));
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, firstOplogEntryTs, false);
@@ -3325,17 +3167,13 @@ public:
}
// The prepare oplog entry should exist at firstOplogEntryTs and onwards.
- const auto prepareOplogEntryFilter =
- BSON("ts" << prepareEntryTs << "o" << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << ui
- << "o"
- << doc))
- << "prepare"
- << true));
+ const auto prepareOplogEntryFilter = BSON(
+ "ts" << prepareEntryTs << "o"
+ << BSON("applyOps"
+ << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui" << ui << "o" << doc))
+ << "prepare" << true));
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, prepareEntryTs, true);
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 1dd468576eb..92d741bbb92 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -48,10 +48,10 @@
namespace ThreadedTests {
-using std::unique_ptr;
using std::cout;
using std::endl;
using std::string;
+using std::unique_ptr;
template <int nthreads_param = 10>
class ThreadedTest {
diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp
index 106960fb6c2..62bed088466 100644
--- a/src/mongo/dbtests/updatetests.cpp
+++ b/src/mongo/dbtests/updatetests.cpp
@@ -47,10 +47,10 @@
namespace UpdateTests {
-using std::unique_ptr;
using std::numeric_limits;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
namespace dps = ::mongo::dotted_path_support;
@@ -1665,8 +1665,8 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0,x:[{a:1},{a:3}]}"));
// { $push : { x : { $each : [ {a:2} ], $sort: {a:1}, $slice:-2 } } }
- BSONObj pushObj = BSON(
- "$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1) << "$slice" << -2.0);
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1)
+ << "$slice" << -2.0);
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj expected = fromjson("{'_id':0,x:[{a:2},{a:3}]}");
BSONObj result = _client.findOne(ns(), Query());
@@ -1680,9 +1680,8 @@ public:
BSONObj expected = fromjson("{'_id':0,x:[{a:1},{a:3}]}");
_client.insert(ns(), expected);
// { $push : { x : { $each : [ {a:2} ], $sort : {a:1}, $sort: {a:1} } } }
- BSONObj pushObj =
- BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1) << "$sort"
- << BSON("a" << 1));
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1)
+ << "$sort" << BSON("a" << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj result = _client.findOne(ns(), Query());
ASSERT_BSONOBJ_EQ(result, expected);
@@ -1763,9 +1762,7 @@ public:
ns(), BSON("_id" << 0 << "a" << 1 << "x" << BSONObj() << "x" << BSONObj() << "z" << 5));
_client.update(ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1)));
ASSERT_BSONOBJ_EQ(BSON("_id" << 0 << "a" << 1 << "x" << BSON("b" << 1 << "c" << 1) << "x"
- << BSONObj()
- << "z"
- << 5),
+ << BSONObj() << "z" << 5),
_client.findOne(ns(), BSONObj()));
}
};
@@ -1779,9 +1776,7 @@ public:
_client.update(
ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1 << "x.d" << 1)));
ASSERT_BSONOBJ_EQ(BSON("_id" << 0 << "x" << BSON("b" << 1 << "c" << 1 << "d" << 1) << "x"
- << BSONObj()
- << "x"
- << BSONObj()),
+ << BSONObj() << "x" << BSONObj()),
_client.findOne(ns(), BSONObj()));
}
};
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index 925d0a88b9a..39f48384421 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -229,18 +229,14 @@ public:
wunit.commit();
}
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "a"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "a"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -305,18 +301,14 @@ public:
wunit.commit();
}
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "a"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "a"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -467,14 +459,10 @@ public:
coll->ns().ns(),
BSON("name"
<< "multikey_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a.b" << 1)
- << "v"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("a.b" << 1) << "v"
<< static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -541,20 +529,14 @@ public:
}
// Create a sparse index.
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "sparse_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false
- << "sparse"
- << true));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "sparse_index"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false << "sparse" << true));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -614,20 +596,15 @@ public:
}
// Create a partial index.
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "partial_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false
- << "partialFilterExpression"
- << BSON("a" << BSON("$gt" << 1))));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "partial_index"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false << "partialFilterExpression"
+ << BSON("a" << BSON("$gt" << 1))));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -680,38 +657,30 @@ public:
}
// Create a partial geo index that indexes the document. This should return an error.
- ASSERT_NOT_OK(dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "partial_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("x"
- << "2dsphere")
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false
- << "partialFilterExpression"
- << BSON("a" << BSON("$eq" << 2)))));
+ ASSERT_NOT_OK(
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "partial_index"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("x"
+ << "2dsphere")
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false << "partialFilterExpression"
+ << BSON("a" << BSON("$eq" << 2)))));
// Create a partial geo index that does not index the document.
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "partial_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("x"
- << "2dsphere")
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false
- << "partialFilterExpression"
- << BSON("a" << BSON("$eq" << 1))));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "partial_index"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("x"
+ << "2dsphere")
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false << "partialFilterExpression"
+ << BSON("a" << BSON("$eq" << 1))));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
releaseDb();
@@ -766,28 +735,20 @@ public:
coll->ns().ns(),
BSON("name"
<< "compound_index_1"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1 << "b" << -1)
- << "v"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("a" << 1 << "b" << -1) << "v"
<< static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << "background" << false));
ASSERT_OK(status);
status = dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "compound_index_2"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << -1 << "b" << 1)
- << "v"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("a" << -1 << "b" << 1) << "v"
<< static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -846,9 +807,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -930,9 +889,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -979,9 +936,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert non-multikey documents.
@@ -1090,9 +1045,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert documents with indexed and not-indexed paths.
@@ -1183,9 +1136,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert documents.
@@ -1272,9 +1223,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert documents.
@@ -1380,9 +1329,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert documents.
diff --git a/src/mongo/embedded/embedded.cpp b/src/mongo/embedded/embedded.cpp
index 54c95abe186..cfde086709d 100644
--- a/src/mongo/embedded/embedded.cpp
+++ b/src/mongo/embedded/embedded.cpp
@@ -110,9 +110,7 @@ void setUpCatalog(ServiceContext* serviceContext) {
// Create a minimalistic replication coordinator to provide a limited interface for users. Not
// functional to provide any replication logic.
ServiceContext::ConstructorActionRegisterer replicationManagerInitializer(
- "CreateReplicationManager",
- {"SSLManager", "default"},
- [](ServiceContext* serviceContext) {
+ "CreateReplicationManager", {"SSLManager", "default"}, [](ServiceContext* serviceContext) {
repl::StorageInterface::set(serviceContext, std::make_unique<repl::StorageInterfaceImpl>());
auto logicalClock = std::make_unique<LogicalClock>(serviceContext);
diff --git a/src/mongo/embedded/embedded_ismaster.cpp b/src/mongo/embedded/embedded_ismaster.cpp
index 20b9b4d8c83..e42c4292dac 100644
--- a/src/mongo/embedded/embedded_ismaster.cpp
+++ b/src/mongo/embedded/embedded_ismaster.cpp
@@ -110,5 +110,5 @@ public:
}
} CmdIsMaster;
-} // namespace repl
+} // namespace
} // namespace mongo
diff --git a/src/mongo/embedded/embedded_options_helpers.cpp b/src/mongo/embedded/embedded_options_helpers.cpp
index 7924db33f18..6815584a9d0 100644
--- a/src/mongo/embedded/embedded_options_helpers.cpp
+++ b/src/mongo/embedded/embedded_options_helpers.cpp
@@ -60,4 +60,4 @@ Status parseCommandLineOptions(int argc,
}
} // namespace embedded_integration_helpers
-} // namepsace mongo
+} // namespace mongo
diff --git a/src/mongo/embedded/stitch_support/stitch_support_test.cpp b/src/mongo/embedded/stitch_support/stitch_support_test.cpp
index e73ef6627e8..e5e552260b5 100644
--- a/src/mongo/embedded/stitch_support/stitch_support_test.cpp
+++ b/src/mongo/embedded/stitch_support/stitch_support_test.cpp
@@ -402,7 +402,7 @@ TEST_F(StitchSupportTest, CheckMatchWorksWithCollation) {
}
TEST_F(StitchSupportTest, CheckProjectionWorksWithDefaults) {
- auto[results, needsMatch] =
+ auto [results, needsMatch] =
checkProjection("{a: 1}", {"{_id: 1, a: 100, b: 200}", "{_id: 1, a: 200, b: 300}"});
ASSERT_FALSE(needsMatch);
ASSERT_EQ("{ \"_id\" : 1, \"a\" : 100 }", results[0]);
@@ -443,7 +443,7 @@ TEST_F(StitchSupportTest, CheckProjectionCollatesRespectfully) {
lib, toBSONForAPI("{locale: 'en', strength: 2}").first, nullptr);
ON_BLOCK_EXIT([collator] { stitch_support_v1_collator_destroy(collator); });
- auto[results, needsMatch] =
+ auto [results, needsMatch] =
checkProjection("{a: {$elemMatch: {$eq: 'MiXedcAse'}}}",
{"{_id: 1, a: ['lowercase', 'mixEdCaSe', 'UPPERCASE']}"},
nullptr,
diff --git a/src/mongo/executor/connection_pool.cpp b/src/mongo/executor/connection_pool.cpp
index e425d72ea5f..76077ae0809 100644
--- a/src/mongo/executor/connection_pool.cpp
+++ b/src/mongo/executor/connection_pool.cpp
@@ -74,7 +74,7 @@ void emplaceOrInvariant(Map&& map, Args&&... args) noexcept {
invariant(ret.second, "Element already existed in map/set");
}
-} // anonymous
+} // namespace
namespace executor {
@@ -164,7 +164,8 @@ public:
const auto& data = getOrInvariant(_poolData, id);
return {
- getPool()->_options.maxConnecting, data.target,
+ getPool()->_options.maxConnecting,
+ data.target,
};
}
@@ -219,11 +220,11 @@ public:
template <typename Callback>
auto guardCallback(Callback&& cb) {
return
- [ this, cb = std::forward<Callback>(cb), anchor = shared_from_this() ](auto&&... args) {
- stdx::lock_guard lk(_parent->_mutex);
- cb(std::forward<decltype(args)>(args)...);
- updateState();
- };
+ [this, cb = std::forward<Callback>(cb), anchor = shared_from_this()](auto&&... args) {
+ stdx::lock_guard lk(_parent->_mutex);
+ cb(std::forward<decltype(args)>(args)...);
+ updateState();
+ };
}
SpecificPool(std::shared_ptr<ConnectionPool> parent,
@@ -519,7 +520,7 @@ void ConnectionPool::get_forTest(const HostAndPort& hostAndPort,
Milliseconds timeout,
GetConnectionCallback cb) {
// We kick ourselves onto the executor queue to prevent us from deadlocking with our own thread
- auto getConnectionFunc = [ this, hostAndPort, timeout, cb = std::move(cb) ](Status &&) mutable {
+ auto getConnectionFunc = [this, hostAndPort, timeout, cb = std::move(cb)](Status&&) mutable {
get(hostAndPort, transport::kGlobalSSLMode, timeout)
.thenRunOn(_factory->getExecutor())
.getAsync(std::move(cb));
@@ -650,7 +651,7 @@ Future<ConnectionPool::ConnectionHandle> ConnectionPool::SpecificPool::getConnec
}
auto ConnectionPool::SpecificPool::makeHandle(ConnectionInterface* connection) -> ConnectionHandle {
- auto deleter = [ this, anchor = shared_from_this() ](ConnectionInterface * connection) {
+ auto deleter = [this, anchor = shared_from_this()](ConnectionInterface* connection) {
stdx::lock_guard lk(_parent->_mutex);
returnConnection(connection);
_lastActiveTime = _parent->_factory->now();
@@ -1120,7 +1121,7 @@ void ConnectionPool::SpecificPool::updateState() {
}
ExecutorFuture(ExecutorPtr(_parent->_factory->getExecutor())) //
- .getAsync([ this, anchor = shared_from_this() ](Status && status) mutable {
+ .getAsync([this, anchor = shared_from_this()](Status&& status) mutable {
invariant(status);
stdx::lock_guard lk(_parent->_mutex);
diff --git a/src/mongo/executor/connection_pool_test.cpp b/src/mongo/executor/connection_pool_test.cpp
index 16dbffeb56e..96ec25ba64d 100644
--- a/src/mongo/executor/connection_pool_test.cpp
+++ b/src/mongo/executor/connection_pool_test.cpp
@@ -80,14 +80,14 @@ protected:
template <typename... Args>
auto getFromPool(Args&&... args) {
return ExecutorFuture(_executor)
- .then([ pool = _pool, args... ]() { return pool->get(args...); })
+ .then([pool = _pool, args...]() { return pool->get(args...); })
.semi();
}
void doneWith(ConnectionPool::ConnectionHandle& conn) {
dynamic_cast<ConnectionImpl*>(conn.get())->indicateSuccess();
- ExecutorFuture(_executor).getAsync([conn = std::move(conn)](auto){});
+ ExecutorFuture(_executor).getAsync([conn = std::move(conn)](auto) {});
}
using StatusWithConn = StatusWith<ConnectionPool::ConnectionHandle>;
diff --git a/src/mongo/executor/connection_pool_test_fixture.cpp b/src/mongo/executor/connection_pool_test_fixture.cpp
index 9b28d752157..5cbfdf8d1d5 100644
--- a/src/mongo/executor/connection_pool_test_fixture.cpp
+++ b/src/mongo/executor/connection_pool_test_fixture.cpp
@@ -74,9 +74,7 @@ void TimerImpl::fireIfNecessary() {
for (auto&& x : timers) {
if (_timers.count(x) && (x->_expiration <= x->now())) {
- auto execCB = [cb = std::move(x->_cb)](auto&&) mutable {
- std::move(cb)();
- };
+ auto execCB = [cb = std::move(x->_cb)](auto&&) mutable { std::move(cb)(); };
auto global = x->_global;
_timers.erase(x);
global->_executor->schedule(std::move(execCB));
@@ -122,7 +120,7 @@ void ConnectionImpl::processSetup() {
_setupQueue.pop_front();
_pushSetupQueue.pop_front();
- connPtr->_global->_executor->schedule([ connPtr, callback = std::move(callback) ](auto&&) {
+ connPtr->_global->_executor->schedule([connPtr, callback = std::move(callback)](auto&&) {
auto cb = std::move(connPtr->_setupCallback);
connPtr->indicateUsed();
cb(connPtr, callback());
@@ -152,7 +150,7 @@ void ConnectionImpl::processRefresh() {
_refreshQueue.pop_front();
_pushRefreshQueue.pop_front();
- connPtr->_global->_executor->schedule([ connPtr, callback = std::move(callback) ](auto&&) {
+ connPtr->_global->_executor->schedule([connPtr, callback = std::move(callback)](auto&&) {
auto cb = std::move(connPtr->_refreshCallback);
connPtr->indicateUsed();
cb(connPtr, callback());
diff --git a/src/mongo/executor/connection_pool_tl.cpp b/src/mongo/executor/connection_pool_tl.cpp
index a7ab984b600..e2f7711cca7 100644
--- a/src/mongo/executor/connection_pool_tl.cpp
+++ b/src/mongo/executor/connection_pool_tl.cpp
@@ -139,7 +139,7 @@ AsyncDBClient* TLConnection::client() {
void TLConnection::setTimeout(Milliseconds timeout, TimeoutCallback cb) {
auto anchor = shared_from_this();
- _timer->setTimeout(timeout, [ cb = std::move(cb), anchor = std::move(anchor) ] { cb(); });
+ _timer->setTimeout(timeout, [cb = std::move(cb), anchor = std::move(anchor)] { cb(); });
}
void TLConnection::cancelTimeout() {
@@ -213,14 +213,14 @@ void TLConnection::setup(Milliseconds timeout, SetupCallback cb) {
auto pf = makePromiseFuture<void>();
auto handler = std::make_shared<TimeoutHandler>(std::move(pf.promise));
std::move(pf.future).thenRunOn(_reactor).getAsync(
- [ this, cb = std::move(cb), anchor ](Status status) { cb(this, std::move(status)); });
+ [this, cb = std::move(cb), anchor](Status status) { cb(this, std::move(status)); });
setTimeout(timeout, [this, handler, timeout] {
if (handler->done.swap(true)) {
return;
}
- std::string reason = str::stream() << "Timed out connecting to " << _peer << " after "
- << timeout;
+ std::string reason = str::stream()
+ << "Timed out connecting to " << _peer << " after " << timeout;
handler->promise.setError(
Status(ErrorCodes::NetworkInterfaceExceededTimeLimit, std::move(reason)));
@@ -286,7 +286,7 @@ void TLConnection::refresh(Milliseconds timeout, RefreshCallback cb) {
auto pf = makePromiseFuture<void>();
auto handler = std::make_shared<TimeoutHandler>(std::move(pf.promise));
std::move(pf.future).thenRunOn(_reactor).getAsync(
- [ this, cb = std::move(cb), anchor ](Status status) { cb(this, status); });
+ [this, cb = std::move(cb), anchor](Status status) { cb(this, status); });
setTimeout(timeout, [this, handler] {
if (handler->done.swap(true)) {
@@ -361,4 +361,4 @@ Date_t TLTypeFactory::now() {
} // namespace connection_pool_tl
} // namespace executor
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/executor/connection_pool_tl.h b/src/mongo/executor/connection_pool_tl.h
index 7a138589055..7297713b92b 100644
--- a/src/mongo/executor/connection_pool_tl.h
+++ b/src/mongo/executor/connection_pool_tl.h
@@ -182,6 +182,6 @@ private:
AsyncDBClient::Handle _client;
};
-} // namespace connection_pool_asio
+} // namespace connection_pool_tl
} // namespace executor
} // namespace mongo
diff --git a/src/mongo/executor/egress_tag_closer.h b/src/mongo/executor/egress_tag_closer.h
index b1ab2430ed6..43d2078f4dd 100644
--- a/src/mongo/executor/egress_tag_closer.h
+++ b/src/mongo/executor/egress_tag_closer.h
@@ -50,9 +50,10 @@ public:
virtual void dropConnections(const HostAndPort& hostAndPort) = 0;
- virtual void mutateTags(const HostAndPort& hostAndPort,
- const std::function<transport::Session::TagMask(
- transport::Session::TagMask)>& mutateFunc) = 0;
+ virtual void mutateTags(
+ const HostAndPort& hostAndPort,
+ const std::function<transport::Session::TagMask(transport::Session::TagMask)>&
+ mutateFunc) = 0;
protected:
EgressTagCloser() {}
diff --git a/src/mongo/executor/network_interface_integration_test.cpp b/src/mongo/executor/network_interface_integration_test.cpp
index 163415c923c..2723f66bde7 100644
--- a/src/mongo/executor/network_interface_integration_test.cpp
+++ b/src/mongo/executor/network_interface_integration_test.cpp
@@ -87,8 +87,7 @@ class HangingHook : public executor::NetworkConnectionHook {
"admin",
BSON("sleep" << 1 << "lock"
<< "none"
- << "secs"
- << 100000000),
+ << "secs" << 100000000),
BSONObj(),
nullptr))};
}
@@ -274,8 +273,7 @@ TEST_F(NetworkInterfaceTest, AsyncOpTimeout) {
auto request = makeTestCommand(Milliseconds{1000});
request.cmdObj = BSON("sleep" << 1 << "lock"
<< "none"
- << "secs"
- << 1000000000);
+ << "secs" << 1000000000);
auto deferred = runCommand(cb, request);
waitForIsMaster();
@@ -322,14 +320,15 @@ TEST_F(NetworkInterfaceTest, SetAlarm) {
Date_t expiration = net().now() + Milliseconds(100);
auto makeTimerFuture = [&] {
auto pf = makePromiseFuture<Date_t>();
- return std::make_pair([ this, promise = std::move(pf.promise) ](Status status) mutable {
- if (status.isOK()) {
- promise.emplaceValue(net().now());
- } else {
- promise.setError(status);
- }
- },
- std::move(pf.future));
+ return std::make_pair(
+ [this, promise = std::move(pf.promise)](Status status) mutable {
+ if (status.isOK()) {
+ promise.emplaceValue(net().now());
+ } else {
+ promise.setError(status);
+ }
+ },
+ std::move(pf.future));
};
auto futurePair = makeTimerFuture();
diff --git a/src/mongo/executor/network_interface_mock.cpp b/src/mongo/executor/network_interface_mock.cpp
index da9aa915b54..86e1144b81e 100644
--- a/src/mongo/executor/network_interface_mock.cpp
+++ b/src/mongo/executor/network_interface_mock.cpp
@@ -487,12 +487,14 @@ void NetworkInterfaceMock::_enqueueOperation_inlock(
ResponseStatus rs(
ErrorCodes::NetworkInterfaceExceededTimeLimit, "Network timeout", Milliseconds(0));
std::vector<NetworkOperationList*> queuesToCheck{&_unscheduled, &_blackHoled, &_scheduled};
- _alarms.emplace(cbh, _now_inlock() + timeout, [
- this,
- cbh = std::move(cbh),
- queuesToCheck = std::move(queuesToCheck),
- rs = std::move(rs)
- ](Status) { _interruptWithResponse_inlock(cbh, queuesToCheck, rs); });
+ _alarms.emplace(cbh,
+ _now_inlock() + timeout,
+ [this,
+ cbh = std::move(cbh),
+ queuesToCheck = std::move(queuesToCheck),
+ rs = std::move(rs)](Status) {
+ _interruptWithResponse_inlock(cbh, queuesToCheck, rs);
+ });
}
}
@@ -535,25 +537,25 @@ void NetworkInterfaceMock::_connectThenEnqueueOperation_inlock(const HostAndPort
auto cbh = op.getCallbackHandle();
// The completion handler for the postconnect command schedules the original command.
auto postconnectCompletionHandler =
- [ this, op = std::move(op) ](TaskExecutor::ResponseOnAnyStatus rs) mutable {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (!rs.isOK()) {
- op.setResponse(_now_inlock(), rs);
- op.finishResponse();
- return;
- }
-
- auto handleStatus = _hook->handleReply(op.getRequest().target, std::move(rs));
-
- if (!handleStatus.isOK()) {
- op.setResponse(_now_inlock(), handleStatus);
- op.finishResponse();
- return;
- }
-
- _connections.emplace(op.getRequest().target);
- _enqueueOperation_inlock(std::move(op));
- };
+ [this, op = std::move(op)](TaskExecutor::ResponseOnAnyStatus rs) mutable {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ if (!rs.isOK()) {
+ op.setResponse(_now_inlock(), rs);
+ op.finishResponse();
+ return;
+ }
+
+ auto handleStatus = _hook->handleReply(op.getRequest().target, std::move(rs));
+
+ if (!handleStatus.isOK()) {
+ op.setResponse(_now_inlock(), handleStatus);
+ op.finishResponse();
+ return;
+ }
+
+ _connections.emplace(op.getRequest().target);
+ _enqueueOperation_inlock(std::move(op));
+ };
auto postconnectOp = NetworkOperation(cbh,
std::move(*hookPostconnectCommand),
diff --git a/src/mongo/executor/network_interface_tl.cpp b/src/mongo/executor/network_interface_tl.cpp
index c7528397951..cbd3484bf1c 100644
--- a/src/mongo/executor/network_interface_tl.cpp
+++ b/src/mongo/executor/network_interface_tl.cpp
@@ -239,21 +239,21 @@ Status NetworkInterfaceTL::startCommand(const TaskExecutor::CallbackHandle& cbHa
auto executor = baton ? ExecutorPtr(baton) : ExecutorPtr(_reactor);
std::move(cmdPF.future)
.thenRunOn(executor)
- .onError([requestId = cmdState->requestOnAny.id](auto error)
- ->StatusWith<RemoteCommandOnAnyResponse> {
- LOG(2) << "Failed to get connection from pool for request " << requestId
- << ": " << redact(error);
-
- // The TransportLayer has, for historical reasons returned SocketException
- // for network errors, but sharding assumes HostUnreachable on network
- // errors.
- if (error == ErrorCodes::SocketException) {
- error = Status(ErrorCodes::HostUnreachable, error.reason());
- }
- return error;
- })
- .getAsync([ this, cmdState, onFinish = std::move(onFinish) ](
- StatusWith<RemoteCommandOnAnyResponse> response) {
+ .onError([requestId = cmdState->requestOnAny.id](
+ auto error) -> StatusWith<RemoteCommandOnAnyResponse> {
+ LOG(2) << "Failed to get connection from pool for request " << requestId << ": "
+ << redact(error);
+
+ // The TransportLayer has, for historical reasons returned SocketException
+ // for network errors, but sharding assumes HostUnreachable on network
+ // errors.
+ if (error == ErrorCodes::SocketException) {
+ error = Status(ErrorCodes::HostUnreachable, error.reason());
+ }
+ return error;
+ })
+ .getAsync([this, cmdState, onFinish = std::move(onFinish)](
+ StatusWith<RemoteCommandOnAnyResponse> response) {
auto duration = now() - cmdState->start;
if (!response.isOK()) {
onFinish(RemoteCommandOnAnyResponse(boost::none, response.getStatus(), duration));
@@ -270,7 +270,7 @@ Status NetworkInterfaceTL::startCommand(const TaskExecutor::CallbackHandle& cbHa
return Status::OK();
}
- auto[connPromise, connFuture] = makePromiseFuture<ConnectionPool::ConnectionHandle>();
+ auto [connPromise, connFuture] = makePromiseFuture<ConnectionPool::ConnectionHandle>();
std::move(connFuture).thenRunOn(executor).getAsync([this, cmdState, baton](auto swConn) {
auto status = swConn.getStatus();
@@ -360,8 +360,7 @@ void NetworkInterfaceTL::_onAcquireConn(std::shared_ptr<CommandState> state,
uasserted(ErrorCodes::NetworkInterfaceExceededTimeLimit,
str::stream() << "Remote command timed out while waiting to get a "
"connection from the pool, took "
- << connDuration
- << ", timeout was set to "
+ << connDuration << ", timeout was set to "
<< state->requestOnAny.timeout);
}
@@ -509,14 +508,14 @@ Status NetworkInterfaceTL::setAlarm(const TaskExecutor::CallbackHandle& cbHandle
// If a user has already scheduled an alarm with a handle, make sure they intentionally
// override it by canceling and setting a new one.
auto alarmPair = std::make_pair(cbHandle, std::shared_ptr<AlarmState>(alarmState));
- auto && [ _, wasInserted ] = _inProgressAlarms.insert(std::move(alarmPair));
+ auto&& [_, wasInserted] = _inProgressAlarms.insert(std::move(alarmPair));
invariant(wasInserted);
}
- alarmState->timer->waitUntil(alarmState->when, nullptr).getAsync([
- this,
- state = std::move(alarmState)
- ](Status status) mutable { _answerAlarm(status, state); });
+ alarmState->timer->waitUntil(alarmState->when, nullptr)
+ .getAsync([this, state = std::move(alarmState)](Status status) mutable {
+ _answerAlarm(status, state);
+ });
return Status::OK();
}
@@ -546,7 +545,7 @@ void NetworkInterfaceTL::_cancelAllAlarms() {
return std::exchange(_inProgressAlarms, {});
}();
- for (auto && [ cbHandle, state ] : alarms) {
+ for (auto&& [cbHandle, state] : alarms) {
state->timer->cancel();
state->promise.setError(Status(ErrorCodes::CallbackCanceled, "Alarm cancelled"));
}
@@ -566,10 +565,10 @@ void NetworkInterfaceTL::_answerAlarm(Status status, std::shared_ptr<AlarmState>
if (status.isOK() && currentTime < state->when) {
LOG(2) << "Alarm returned early. Expected at: " << state->when
<< ", fired at: " << currentTime;
- state->timer->waitUntil(state->when, nullptr).getAsync([
- this,
- state = std::move(state)
- ](Status status) mutable { _answerAlarm(status, state); });
+ state->timer->waitUntil(state->when, nullptr)
+ .getAsync([this, state = std::move(state)](Status status) mutable {
+ _answerAlarm(status, state);
+ });
return;
}
diff --git a/src/mongo/executor/scoped_task_executor.cpp b/src/mongo/executor/scoped_task_executor.cpp
index 0f718242163..6f2b4823139 100644
--- a/src/mongo/executor/scoped_task_executor.cpp
+++ b/src/mongo/executor/scoped_task_executor.cpp
@@ -68,7 +68,7 @@ public:
return _cbHandles;
}();
- for (auto & [ id, handle ] : handles) {
+ for (auto& [id, handle] : handles) {
// If we don't have a handle yet, it means there's a scheduling thread that's
// dropped the lock but hasn't yet stashed it (or failed to schedule it on the
// underlying executor).
@@ -223,7 +223,7 @@ private:
// State 2 - Indeterminate state. We don't know yet if the task will get scheduled.
auto swCbHandle = std::forward<ScheduleCall>(schedule)(
- [ id, work = std::forward<Work>(work), self = shared_from_this() ](const auto& cargs) {
+ [id, work = std::forward<Work>(work), self = shared_from_this()](const auto& cargs) {
using ArgsT = std::decay_t<decltype(cargs)>;
stdx::unique_lock<stdx::mutex> lk(self->_mutex);
diff --git a/src/mongo/executor/task_executor.h b/src/mongo/executor/task_executor.h
index a392f72b974..d36f5c9bac6 100644
--- a/src/mongo/executor/task_executor.h
+++ b/src/mongo/executor/task_executor.h
@@ -30,7 +30,6 @@
#pragma once
#include <functional>
-#include <functional>
#include <memory>
#include <string>
diff --git a/src/mongo/executor/task_executor_cursor_integration_test.cpp b/src/mongo/executor/task_executor_cursor_integration_test.cpp
index e65068990dc..55b75650083 100644
--- a/src/mongo/executor/task_executor_cursor_integration_test.cpp
+++ b/src/mongo/executor/task_executor_cursor_integration_test.cpp
@@ -90,8 +90,7 @@ TEST_F(TaskExecutorCursorFixture, Basic) {
"test",
BSON("find"
<< "test"
- << "batchSize"
- << 10),
+ << "batchSize" << 10),
opCtx.get());
TaskExecutorCursor tec(executor(), rcr, [] {
diff --git a/src/mongo/executor/task_executor_cursor_test.cpp b/src/mongo/executor/task_executor_cursor_test.cpp
index 7fc7af43c65..57719c44a2c 100644
--- a/src/mongo/executor/task_executor_cursor_test.cpp
+++ b/src/mongo/executor/task_executor_cursor_test.cpp
@@ -95,14 +95,10 @@ public:
NetworkInterfaceMock::InNetworkGuard ing(getNet());
ASSERT(getNet()->hasReadyRequests());
- auto rcr = getNet()->scheduleSuccessfulResponse(BSON(
- "cursorsKilled" << BSON_ARRAY((long long)(cursorId)) << "cursorsNotFound" << BSONArray()
- << "cursorsAlive"
- << BSONArray()
- << "cursorsUnknown"
- << BSONArray()
- << "ok"
- << 1));
+ auto rcr = getNet()->scheduleSuccessfulResponse(
+ BSON("cursorsKilled" << BSON_ARRAY((long long)(cursorId)) << "cursorsNotFound"
+ << BSONArray() << "cursorsAlive" << BSONArray() << "cursorsUnknown"
+ << BSONArray() << "ok" << 1));
getNet()->runReadyNetworkOperations();
return rcr.cmdObj.getOwned();
@@ -124,8 +120,7 @@ public:
TEST_F(TaskExecutorCursorFixture, SingleBatchWorks) {
auto findCmd = BSON("find"
<< "test"
- << "batchSize"
- << 2);
+ << "batchSize" << 2);
RemoteCommandRequest rcr(HostAndPort("localhost"), "test", findCmd, opCtx.get());
@@ -150,8 +145,7 @@ TEST_F(TaskExecutorCursorFixture, FailureInFind) {
"test",
BSON("find"
<< "test"
- << "batchSize"
- << 2),
+ << "batchSize" << 2),
opCtx.get());
TaskExecutorCursor tec(&getExecutor(), rcr);
@@ -175,8 +169,7 @@ TEST_F(TaskExecutorCursorFixture, EarlyReturnKillsCursor) {
"test",
BSON("find"
<< "test"
- << "batchSize"
- << 2),
+ << "batchSize" << 2),
opCtx.get());
{
@@ -189,8 +182,7 @@ TEST_F(TaskExecutorCursorFixture, EarlyReturnKillsCursor) {
ASSERT_BSONOBJ_EQ(BSON("killCursors"
<< "test"
- << "cursors"
- << BSON_ARRAY(1)),
+ << "cursors" << BSON_ARRAY(1)),
scheduleSuccessfulKillCursorResponse(1));
}
@@ -202,8 +194,7 @@ TEST_F(TaskExecutorCursorFixture, MultipleBatchesWorks) {
"test",
BSON("find"
<< "test"
- << "batchSize"
- << 2),
+ << "batchSize" << 2),
opCtx.get());
TaskExecutorCursor tec(&getExecutor(), rcr, [] {
@@ -230,8 +221,7 @@ TEST_F(TaskExecutorCursorFixture, MultipleBatchesWorks) {
// We can pick up after that interruption though
ASSERT_BSONOBJ_EQ(BSON("getMore" << (long long)(1) << "collection"
<< "test"
- << "batchSize"
- << 3),
+ << "batchSize" << 3),
scheduleSuccessfulCursorResponse("nextBatch", 3, 5, 1));
ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 3);
@@ -257,8 +247,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
auto findCmd = BSON("find"
<< "test"
- << "batchSize"
- << 1);
+ << "batchSize" << 1);
RemoteCommandRequest rcr(HostAndPort("localhost"), "test", findCmd, opCtx.get());
@@ -272,10 +261,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
// lsid in the first batch
ASSERT_BSONOBJ_EQ(BSON("find"
<< "test"
- << "batchSize"
- << 1
- << "lsid"
- << lsid.toBSON()),
+ << "batchSize" << 1 << "lsid" << lsid.toBSON()),
scheduleSuccessfulCursorResponse("firstBatch", 1, 1, 1));
ASSERT_EQUALS(tec->getNext(opCtx.get()).get()["x"].Int(), 1);
@@ -283,10 +269,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
// lsid in the getmore
ASSERT_BSONOBJ_EQ(BSON("getMore" << (long long)(1) << "collection"
<< "test"
- << "batchSize"
- << 1
- << "lsid"
- << lsid.toBSON()),
+ << "batchSize" << 1 << "lsid" << lsid.toBSON()),
scheduleSuccessfulCursorResponse("nextBatch", 2, 2, 1));
tec.reset();
@@ -294,10 +277,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
// lsid in the killcursor
ASSERT_BSONOBJ_EQ(BSON("killCursors"
<< "test"
- << "cursors"
- << BSON_ARRAY(1)
- << "lsid"
- << lsid.toBSON()),
+ << "cursors" << BSON_ARRAY(1) << "lsid" << lsid.toBSON()),
scheduleSuccessfulKillCursorResponse(1));
ASSERT_FALSE(hasReadyRequests());
diff --git a/src/mongo/executor/task_executor_test_common.cpp b/src/mongo/executor/task_executor_test_common.cpp
index 02cc4367bba..a80a96ff55e 100644
--- a/src/mongo/executor/task_executor_test_common.cpp
+++ b/src/mongo/executor/task_executor_test_common.cpp
@@ -149,10 +149,9 @@ auto makeSetStatusOnRemoteCommandCompletionClosure(const RemoteCommandRequest* e
return str::stream() << "Request(" << request.target.toString() << ", "
<< request.dbname << ", " << request.cmdObj << ')';
};
- *outStatus =
- Status(ErrorCodes::BadValue,
- str::stream() << "Actual request: " << desc(cbData.request) << "; expected: "
- << desc(*expectedRequest));
+ *outStatus = Status(ErrorCodes::BadValue,
+ str::stream() << "Actual request: " << desc(cbData.request)
+ << "; expected: " << desc(*expectedRequest));
return;
}
*outStatus = cbData.response.status;
diff --git a/src/mongo/executor/task_executor_test_common.h b/src/mongo/executor/task_executor_test_common.h
index 1cb87db0422..69e5a37a0e9 100644
--- a/src/mongo/executor/task_executor_test_common.h
+++ b/src/mongo/executor/task_executor_test_common.h
@@ -49,9 +49,10 @@ class TaskExecutor;
* presumably after the release of MSVC2015, the signature can be changed to take the unique_ptr
* by value.
*/
-void addTestsForExecutor(const std::string& suiteName,
- std::function<std::unique_ptr<TaskExecutor>(
- std::unique_ptr<NetworkInterfaceMock>)> makeExecutor);
+void addTestsForExecutor(
+ const std::string& suiteName,
+ std::function<std::unique_ptr<TaskExecutor>(std::unique_ptr<NetworkInterfaceMock>)>
+ makeExecutor);
} // namespace executor
} // namespace mongo
diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp
index 7ef0669aea0..808b2a7350c 100644
--- a/src/mongo/executor/thread_pool_task_executor.cpp
+++ b/src/mongo/executor/thread_pool_task_executor.cpp
@@ -360,7 +360,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleWorkAt(
lk.unlock();
auto status = _net->setAlarm(
- cbHandle.getValue(), when, [ this, cbHandle = cbHandle.getValue() ](Status status) {
+ cbHandle.getValue(), when, [this, cbHandle = cbHandle.getValue()](Status status) {
if (status == ErrorCodes::CallbackCanceled) {
return;
}
diff --git a/src/mongo/idl/config_option_test.cpp b/src/mongo/idl/config_option_test.cpp
index 9e94434da85..e3e8b8782df 100644
--- a/src/mongo/idl/config_option_test.cpp
+++ b/src/mongo/idl/config_option_test.cpp
@@ -501,8 +501,7 @@ TEST(RedactionBSON, Strings) {
<< "also not a password"
<< "test.config.opt16depr2"
<< "this password should also be censored"
- << "lastarg"
- << false);
+ << "lastarg" << false);
BSONObj res = BSON("firstarg"
<< "not a password"
@@ -514,8 +513,7 @@ TEST(RedactionBSON, Strings) {
<< "also not a password"
<< "test.config.opt16depr2"
<< "<password>"
- << "lastarg"
- << false);
+ << "lastarg" << false);
cmdline_utils::censorBSONObj(&obj);
ASSERT_BSONOBJ_EQ(res, obj);
@@ -535,8 +533,7 @@ TEST(RedactionBSON, Arrays) {
<< "test.config.opt16depr2"
<< BSON_ARRAY("first censored password"
<< "next censored password")
- << "lastarg"
- << false);
+ << "lastarg" << false);
BSONObj res = BSON("firstarg"
<< "not a password"
@@ -551,8 +548,7 @@ TEST(RedactionBSON, Arrays) {
<< "test.config.opt16depr2"
<< BSON_ARRAY("<password>"
<< "<password>")
- << "lastarg"
- << false);
+ << "lastarg" << false);
cmdline_utils::censorBSONObj(&obj);
ASSERT_BSONOBJ_EQ(res, obj);
@@ -571,8 +567,7 @@ TEST(RedactionBSON, SubObjects) {
<< "next censored password")
<< "opt16depr"
<< "should be censored too"))
- << "lastarg"
- << false);
+ << "lastarg" << false);
BSONObj res = BSON("firstarg"
<< "not a password"
@@ -586,8 +581,7 @@ TEST(RedactionBSON, SubObjects) {
<< "<password>")
<< "opt16depr"
<< "<password>"))
- << "lastarg"
- << false);
+ << "lastarg" << false);
cmdline_utils::censorBSONObj(&obj);
ASSERT_BSONOBJ_EQ(res, obj);
@@ -620,7 +614,9 @@ TEST(ConfigOptionNoInit, Opt1) {
ASSERT_OK(addIDLTestConfigs(&options));
const std::vector<std::string> argv({
- "mongod", "--testConfigNoInitOpt1", "Hello",
+ "mongod",
+ "--testConfigNoInitOpt1",
+ "Hello",
});
moe::Environment parsed;
ASSERT_OK(moe::OptionsParser().run(options, argv, {}, &parsed));
diff --git a/src/mongo/idl/idl_parser.cpp b/src/mongo/idl/idl_parser.cpp
index 0a45ac52709..135d49b784b 100644
--- a/src/mongo/idl/idl_parser.cpp
+++ b/src/mongo/idl/idl_parser.cpp
@@ -76,9 +76,7 @@ bool IDLParserErrorContext::checkAndAssertTypeSlowPath(const BSONElement& elemen
std::string path = getElementPath(element);
uasserted(ErrorCodes::TypeMismatch,
str::stream() << "BSON field '" << path << "' is the wrong type '"
- << typeName(elementType)
- << "', expected type '"
- << typeName(type)
+ << typeName(elementType) << "', expected type '" << typeName(type)
<< "'");
}
@@ -93,10 +91,8 @@ bool IDLParserErrorContext::checkAndAssertBinDataTypeSlowPath(const BSONElement&
std::string path = getElementPath(element);
uasserted(ErrorCodes::TypeMismatch,
str::stream() << "BSON field '" << path << "' is the wrong bindData type '"
- << typeName(element.binDataType())
- << "', expected type '"
- << typeName(type)
- << "'");
+ << typeName(element.binDataType()) << "', expected type '"
+ << typeName(type) << "'");
}
return true;
@@ -117,9 +113,7 @@ bool IDLParserErrorContext::checkAndAssertTypes(const BSONElement& element,
std::string type_str = toCommaDelimitedList(types);
uasserted(ErrorCodes::TypeMismatch,
str::stream() << "BSON field '" << path << "' is the wrong type '"
- << typeName(element.type())
- << "', expected types '["
- << type_str
+ << typeName(element.type()) << "', expected types '[" << type_str
<< "']");
}
@@ -204,10 +198,8 @@ void IDLParserErrorContext::throwBadArrayFieldNumberSequence(std::uint32_t actua
std::string path = getElementPath(StringData());
uasserted(40423,
str::stream() << "BSON array field '" << path << "' has a non-sequential value '"
- << actualValue
- << "' for an array field name, expected value '"
- << expectedValue
- << "'.");
+ << actualValue << "' for an array field name, expected value '"
+ << expectedValue << "'.");
}
void IDLParserErrorContext::throwBadEnumValue(int enumValue) const {
diff --git a/src/mongo/idl/idl_parser.h b/src/mongo/idl/idl_parser.h
index 32a3f83b1af..70fdcd97f76 100644
--- a/src/mongo/idl/idl_parser.h
+++ b/src/mongo/idl/idl_parser.h
@@ -188,8 +188,8 @@ private:
bool checkAndAssertTypeSlowPath(const BSONElement& element, BSONType type) const;
/**
- * See comment on checkAndAssertBinDataType.
- */
+ * See comment on checkAndAssertBinDataType.
+ */
bool checkAndAssertBinDataTypeSlowPath(const BSONElement& element, BinDataType type) const;
private:
@@ -222,10 +222,7 @@ template <typename T>
void throwComparisonError(StringData fieldName, StringData op, T actualValue, T expectedValue) {
uasserted(51024,
str::stream() << "BSON field '" << fieldName << "' value must be " << op << " "
- << expectedValue
- << ", actual value '"
- << actualValue
- << "'");
+ << expectedValue << ", actual value '" << actualValue << "'");
}
diff --git a/src/mongo/idl/idl_test.cpp b/src/mongo/idl/idl_test.cpp
index f08ec698351..48bfe499823 100644
--- a/src/mongo/idl/idl_test.cpp
+++ b/src/mongo/idl/idl_test.cpp
@@ -100,8 +100,8 @@ void assertOpMsgEquals(const OpMsgRequest& left, const OpMsgRequest& right) {
}
/**
-* Validate two OpMsgRequests are the same including their DocumentSequences.
-*/
+ * Validate two OpMsgRequests are the same including their DocumentSequences.
+ */
void assertOpMsgEqualsExact(const OpMsgRequest& left, const OpMsgRequest& right) {
ASSERT_BSONOBJ_EQ(left.body, right.body);
@@ -766,11 +766,8 @@ TEST(IDLFieldTests, TestOptionalFields) {
template <typename TestT>
void TestWeakType(TestT test_value) {
IDLParserErrorContext ctxt("root");
- auto testDoc =
- BSON("field1" << test_value << "field2" << test_value << "field3" << test_value << "field4"
- << test_value
- << "field5"
- << test_value);
+ auto testDoc = BSON("field1" << test_value << "field2" << test_value << "field3" << test_value
+ << "field4" << test_value << "field5" << test_value);
auto testStruct = Optional_field::parse(ctxt, testDoc);
ASSERT_FALSE(testStruct.getField1().is_initialized());
@@ -860,11 +857,8 @@ TEST(IDLArrayTests, TestSimpleArrays) {
auto testDoc = BSON("field1" << BSON_ARRAY("Foo"
<< "Bar"
<< "???")
- << "field2"
- << BSON_ARRAY(1 << 2 << 3)
- << "field3"
- << BSON_ARRAY(1.2 << 3.4 << 5.6)
- << "field4"
+ << "field2" << BSON_ARRAY(1 << 2 << 3) << "field3"
+ << BSON_ARRAY(1.2 << 3.4 << 5.6) << "field4"
<< BSON_ARRAY(BSONBinData(array1, 3, BinDataGeneral)
<< BSONBinData(array2, 3, BinDataGeneral))
<< "field5"
@@ -927,12 +921,10 @@ TEST(IDLArrayTests, TestSimpleOptionalArrays) {
auto testDoc = BSON("field1" << BSON_ARRAY("Foo"
<< "Bar"
<< "???")
- << "field2"
- << BSON_ARRAY(1 << 2 << 3)
- << "field3"
+ << "field2" << BSON_ARRAY(1 << 2 << 3) << "field3"
<< BSON_ARRAY(1.2 << 3.4 << 5.6)
- );
+ );
auto testStruct = Optional_array_fields::parse(ctxt, testDoc);
assert_same_types<decltype(testStruct.getField1()),
@@ -1062,35 +1054,27 @@ TEST(IDLArrayTests, TestArraysOfComplexTypes) {
IDLParserErrorContext ctxt("root");
// Positive: Test document
- auto testDoc = BSON("field1" << BSON_ARRAY(1 << 2 << 3) << "field2" << BSON_ARRAY("a.b"
- << "c.d")
- << "field3"
- << BSON_ARRAY(1 << "2")
- << "field4"
- << BSON_ARRAY(BSONObj() << BSONObj())
- << "field5"
- << BSON_ARRAY(BSONObj() << BSONObj() << BSONObj())
- << "field6"
+ auto testDoc = BSON("field1" << BSON_ARRAY(1 << 2 << 3) << "field2"
+ << BSON_ARRAY("a.b"
+ << "c.d")
+ << "field3" << BSON_ARRAY(1 << "2") << "field4"
+ << BSON_ARRAY(BSONObj() << BSONObj()) << "field5"
+ << BSON_ARRAY(BSONObj() << BSONObj() << BSONObj()) << "field6"
<< BSON_ARRAY(BSON("value"
<< "hello")
<< BSON("value"
<< "world"))
- << "field1o"
- << BSON_ARRAY(1 << 2 << 3)
- << "field2o"
+ << "field1o" << BSON_ARRAY(1 << 2 << 3) << "field2o"
<< BSON_ARRAY("a.b"
<< "c.d")
- << "field3o"
- << BSON_ARRAY(1 << "2")
- << "field4o"
- << BSON_ARRAY(BSONObj() << BSONObj())
- << "field6o"
+ << "field3o" << BSON_ARRAY(1 << "2") << "field4o"
+ << BSON_ARRAY(BSONObj() << BSONObj()) << "field6o"
<< BSON_ARRAY(BSON("value"
<< "goodbye")
<< BSON("value"
<< "world"))
- );
+ );
auto testStruct = Complex_array_fields::parse(ctxt, testDoc);
assert_same_types<decltype(testStruct.getField1()), const std::vector<std::int64_t>&>();
@@ -1406,8 +1390,7 @@ TEST(IDLChainedType, TestChainedType) {
auto testDoc = BSON("field1"
<< "abc"
- << "field2"
- << 5);
+ << "field2" << 5);
auto testStruct = Chained_struct_only::parse(ctxt, testDoc);
@@ -1450,10 +1433,7 @@ TEST(IDLChainedType, TestExtraFields) {
auto testDoc = BSON("field1"
<< "abc"
- << "field2"
- << 5
- << "field3"
- << 123456);
+ << "field2" << 5 << "field3" << 123456);
auto testStruct = Chained_struct_only::parse(ctxt, testDoc);
ASSERT_EQUALS(testStruct.getChainedType().getField1(), "abc");
@@ -1467,10 +1447,7 @@ TEST(IDLChainedType, TestDuplicateFields) {
auto testDoc = BSON("field1"
<< "abc"
- << "field2"
- << 5
- << "field2"
- << 123456);
+ << "field2" << 5 << "field2" << 123456);
ASSERT_THROWS(Chained_struct_only::parse(ctxt, testDoc), AssertionException);
}
@@ -1480,8 +1457,9 @@ TEST(IDLChainedType, TestDuplicateFields) {
TEST(IDLChainedType, TestChainedStruct) {
IDLParserErrorContext ctxt("root");
- auto testDoc = BSON("anyField" << 123.456 << "objectField" << BSON("random"
- << "pair")
+ auto testDoc = BSON("anyField" << 123.456 << "objectField"
+ << BSON("random"
+ << "pair")
<< "field3"
<< "abc");
@@ -1511,13 +1489,10 @@ TEST(IDLChainedType, TestChainedStructWithExtraFields) {
{
auto testDoc = BSON("field3"
<< "abc"
- << "anyField"
- << 123.456
- << "objectField"
+ << "anyField" << 123.456 << "objectField"
<< BSON("random"
<< "pair")
- << "extraField"
- << 787);
+ << "extraField" << 787);
ASSERT_THROWS(Chained_struct_mixed::parse(ctxt, testDoc), AssertionException);
}
@@ -1526,13 +1501,10 @@ TEST(IDLChainedType, TestChainedStructWithExtraFields) {
{
auto testDoc = BSON("field3"
<< "abc"
- << "anyField"
- << 123.456
- << "objectField"
+ << "anyField" << 123.456 << "objectField"
<< BSON("random"
<< "pair")
- << "anyField"
- << 787);
+ << "anyField" << 787);
ASSERT_THROWS(Chained_struct_mixed::parse(ctxt, testDoc), AssertionException);
}
@@ -1542,9 +1514,7 @@ TEST(IDLChainedType, TestChainedStructWithExtraFields) {
<< "thing")
<< "field3"
<< "abc"
- << "anyField"
- << 123.456
- << "objectField"
+ << "anyField" << 123.456 << "objectField"
<< BSON("random"
<< "pair"));
ASSERT_THROWS(Chained_struct_mixed::parse(ctxt, testDoc), AssertionException);
@@ -1554,9 +1524,7 @@ TEST(IDLChainedType, TestChainedStructWithExtraFields) {
{
auto testDoc = BSON("field3"
<< "abc"
- << "anyField"
- << 123.456
- << "objectField"
+ << "anyField" << 123.456 << "objectField"
<< BSON("random"
<< "pair")
<< "field3"
@@ -1572,12 +1540,9 @@ TEST(IDLChainedType, TestChainedMixedStruct) {
auto testDoc = BSON("field1"
<< "abc"
- << "field2"
- << 5
- << "stringField"
+ << "field2" << 5 << "stringField"
<< "def"
- << "field3"
- << 456);
+ << "field3" << 456);
auto testStruct = Chained_struct_type_mixed::parse(ctxt, testDoc);
@@ -1718,9 +1683,7 @@ TEST(IDLCommand, TestConcatentateWithDb) {
IDLParserErrorContext ctxt("root");
auto testDoc = BSON(BasicConcatenateWithDbCommand::kCommandName << "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "$db"
<< "db");
@@ -1742,11 +1705,10 @@ TEST(IDLCommand, TestConcatentateWithDb) {
// Positive: Test we can serialize from nothing the same document except for $db
{
- auto testDocWithoutDb = BSON(BasicConcatenateWithDbCommand::kCommandName << "coll1"
- << "field1"
- << 3
- << "field2"
- << "five");
+ auto testDocWithoutDb =
+ BSON(BasicConcatenateWithDbCommand::kCommandName << "coll1"
+ << "field1" << 3 << "field2"
+ << "five");
BSONObjBuilder builder;
BasicConcatenateWithDbCommand one_new(NamespaceString("db.coll1"));
@@ -1791,11 +1753,10 @@ TEST(IDLCommand, TestConcatentateWithDbNegative) {
// Negative - duplicate namespace field
{
- auto testDoc = BSON("BasicConcatenateWithDbCommand" << 1 << "field1" << 3
- << "BasicConcatenateWithDbCommand"
- << 1
- << "field2"
- << "five");
+ auto testDoc =
+ BSON("BasicConcatenateWithDbCommand" << 1 << "field1" << 3
+ << "BasicConcatenateWithDbCommand" << 1 << "field2"
+ << "five");
ASSERT_THROWS(BasicConcatenateWithDbCommand::parse(ctxt, makeOMR(testDoc)),
AssertionException);
}
@@ -1838,13 +1799,12 @@ TEST(IDLCommand, TestConcatentateWithDbNegative) {
TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestNSS) {
IDLParserErrorContext ctxt("root");
- auto testDoc = BSON(BasicConcatenateWithDbOrUUIDCommand::kCommandName << "coll1"
- << "field1"
- << 3
- << "field2"
- << "five"
- << "$db"
- << "db");
+ auto testDoc =
+ BSON(BasicConcatenateWithDbOrUUIDCommand::kCommandName << "coll1"
+ << "field1" << 3 << "field2"
+ << "five"
+ << "$db"
+ << "db");
auto testStruct = BasicConcatenateWithDbOrUUIDCommand::parse(ctxt, makeOMR(testDoc));
ASSERT_EQUALS(testStruct.getField1(), 3);
@@ -1863,11 +1823,10 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestNSS) {
// Positive: Test we can serialize from nothing the same document except for $db
{
- auto testDocWithoutDb = BSON(BasicConcatenateWithDbOrUUIDCommand::kCommandName << "coll1"
- << "field1"
- << 3
- << "field2"
- << "five");
+ auto testDocWithoutDb =
+ BSON(BasicConcatenateWithDbOrUUIDCommand::kCommandName << "coll1"
+ << "field1" << 3 << "field2"
+ << "five");
BSONObjBuilder builder;
BasicConcatenateWithDbOrUUIDCommand one_new(NamespaceString("db.coll1"));
@@ -1921,9 +1880,9 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestUUID) {
// Positive: Test we can serialize from nothing the same document except for $db
{
- auto testDocWithoutDb = BSON(
- BasicConcatenateWithDbOrUUIDCommand::kCommandName << uuid << "field1" << 3 << "field2"
- << "five");
+ auto testDocWithoutDb = BSON(BasicConcatenateWithDbOrUUIDCommand::kCommandName
+ << uuid << "field1" << 3 << "field2"
+ << "five");
BSONObjBuilder builder;
BasicConcatenateWithDbOrUUIDCommand one_new(NamespaceStringOrUUID("db", uuid));
@@ -1954,11 +1913,9 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUIDNegative) {
// Negative - duplicate namespace field
{
auto testDoc =
- BSON("BasicConcatenateWithDbOrUUIDCommand" << 1 << "field1" << 3
- << "BasicConcatenateWithDbOrUUIDCommand"
- << 1
- << "field2"
- << "five");
+ BSON("BasicConcatenateWithDbOrUUIDCommand"
+ << 1 << "field1" << 3 << "BasicConcatenateWithDbOrUUIDCommand" << 1 << "field2"
+ << "five");
ASSERT_THROWS(BasicConcatenateWithDbOrUUIDCommand::parse(ctxt, makeOMR(testDoc)),
AssertionException);
}
@@ -2040,9 +1997,9 @@ TEST(IDLCommand, TestIgnoredNegative) {
// Negative - duplicate namespace field
{
- auto testDoc = BSON(
- "BasicIgnoredCommand" << 1 << "field1" << 3 << "BasicIgnoredCommand" << 1 << "field2"
- << "five");
+ auto testDoc = BSON("BasicIgnoredCommand" << 1 << "field1" << 3 << "BasicIgnoredCommand"
+ << 1 << "field2"
+ << "five");
ASSERT_THROWS(BasicIgnoredCommand::parse(ctxt, makeOMR(testDoc)), AssertionException);
}
@@ -2067,9 +2024,7 @@ TEST(IDLDocSequence, TestBasic) {
auto testTempDoc = BSON("DocSequenceCommand"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "$db"
<< "db"
@@ -2078,8 +2033,7 @@ TEST(IDLDocSequence, TestBasic) {
<< "hello")
<< BSON("value"
<< "world"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request;
request.body = testTempDoc;
@@ -2145,15 +2099,12 @@ TEST(IDLDocSequence, TestMissingDB) {
auto testTempDoc = BSON("DocSequenceCommand"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "structs"
<< BSON_ARRAY(BSON("value"
<< "hello"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request;
request.body = testTempDoc;
@@ -2167,9 +2118,7 @@ void TestDocSequence(StringData name) {
IDLParserErrorContext ctxt("root");
auto testTempDoc = BSON(name << "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five");
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
@@ -2208,9 +2157,7 @@ void TestBadDocSequences(StringData name, bool extraFieldAllowed) {
IDLParserErrorContext ctxt("root");
auto testTempDoc = BSON(name << "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five");
// Negative: Duplicate fields in doc sequence
@@ -2279,17 +2226,14 @@ void TestDuplicateDocSequences(StringData name) {
// Negative: Duplicate fields in doc sequence and body
{
auto testTempDoc = BSON(name << "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "structs"
<< BSON_ARRAY(BSON("value"
<< "hello")
<< BSON("value"
<< "world"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
request.sequences.push_back({"structs",
@@ -2304,17 +2248,14 @@ void TestDuplicateDocSequences(StringData name) {
// Negative: Duplicate fields in doc sequence and body
{
auto testTempDoc = BSON(name << "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "structs"
<< BSON_ARRAY(BSON("value"
<< "hello")
<< BSON("value"
<< "world"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
request.sequences.push_back({"objects", {BSON("foo" << 1)}});
@@ -2337,17 +2278,14 @@ TEST(IDLDocSequence, TestEmptySequence) {
{
auto testTempDoc = BSON("DocSequenceCommand"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "structs"
<< BSON_ARRAY(BSON("value"
<< "hello")
<< BSON("value"
<< "world"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
request.sequences.push_back({"structs", {}});
@@ -2359,12 +2297,9 @@ TEST(IDLDocSequence, TestEmptySequence) {
{
auto testTempDoc = BSON("DocSequenceCommand"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
request.sequences.push_back({"structs", {}});
@@ -2396,19 +2331,14 @@ TEST(IDLDocSequence, TestWellKnownFieldsAreIgnored) {
for (auto knownField : knownFields) {
auto testTempDoc = BSON("DocSequenceCommand"
<< "coll1"
- << "field1"
- << 3
- << "field2"
- << "five"
- << knownField
- << "extra"
+ << "field1" << 3 << "field2"
+ << "five" << knownField << "extra"
<< "structs"
<< BSON_ARRAY(BSON("value"
<< "hello")
<< BSON("value"
<< "world"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
@@ -2448,21 +2378,16 @@ TEST(IDLDocSequence, TestWellKnownFieldsPassthrough) {
for (auto knownField : knownFields) {
auto testTempDoc = BSON("DocSequenceCommand"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "$db"
- << "db"
- << knownField
- << "extra"
+ << "db" << knownField << "extra"
<< "structs"
<< BSON_ARRAY(BSON("value"
<< "hello")
<< BSON("value"
<< "world"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request;
request.body = testTempDoc;
@@ -2482,9 +2407,7 @@ TEST(IDLDocSequence, TestNonStrict) {
{
auto testTempDoc = BSON("DocSequenceCommandNonStrict"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five");
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
@@ -2504,12 +2427,9 @@ TEST(IDLDocSequence, TestNonStrict) {
{
auto testTempDoc = BSON("DocSequenceCommandNonStrict"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
- << "extra"
- << 1);
+ << "extra" << 1);
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
request.sequences.push_back({"structs",
@@ -2531,19 +2451,14 @@ TEST(IDLCommand, TestKnownFieldDuplicate) {
auto testPassthrough = BSON("$db"
<< "foo"
- << "maxTimeMS"
- << 6
- << "$client"
+ << "maxTimeMS" << 6 << "$client"
<< "foo");
auto testDoc = BSON("KnownFieldCommand"
<< "coll1"
<< "$db"
<< "db"
- << "field1"
- << 28
- << "maxTimeMS"
- << 42);
+ << "field1" << 28 << "maxTimeMS" << 42);
auto testStruct = KnownFieldCommand::parse(ctxt, makeOMR(testDoc));
ASSERT_EQUALS(28, testStruct.getField1());
@@ -2553,11 +2468,7 @@ TEST(IDLCommand, TestKnownFieldDuplicate) {
auto expectedOpMsgDoc = BSON("KnownFieldCommand"
<< "coll1"
- << "field1"
- << 28
- << "maxTimeMS"
- << 42
- << "$db"
+ << "field1" << 28 << "maxTimeMS" << 42 << "$db"
<< "db"
<< "$client"
@@ -2569,11 +2480,7 @@ TEST(IDLCommand, TestKnownFieldDuplicate) {
auto expectedBSONDoc = BSON("KnownFieldCommand"
<< "coll1"
- << "field1"
- << 28
- << "maxTimeMS"
- << 42
- << "$db"
+ << "field1" << 28 << "maxTimeMS" << 42 << "$db"
<< "foo"
<< "$client"
@@ -2664,14 +2571,9 @@ TEST(IDLValidatedField, Int_basic_ranges) {
std::int32_t byte_range,
std::int32_t int_range) {
IDLParserErrorContext ctxt("root");
- auto doc =
- BSON("positive_int" << pos << "negative_int" << neg << "non_negative_int" << nonneg
- << "non_positive_int"
- << nonpos
- << "byte_range_int"
- << byte_range
- << "range_int"
- << int_range);
+ auto doc = BSON("positive_int" << pos << "negative_int" << neg << "non_negative_int"
+ << nonneg << "non_positive_int" << nonpos << "byte_range_int"
+ << byte_range << "range_int" << int_range);
auto obj = Int_basic_ranges::parse(ctxt, doc);
ASSERT_EQUALS(obj.getPositive_int(), pos);
ASSERT_EQUALS(obj.getNegative_int(), neg);
@@ -2689,14 +2591,9 @@ TEST(IDLValidatedField, Int_basic_ranges) {
std::int32_t byte_range,
std::int32_t int_range) {
IDLParserErrorContext ctxt("root");
- auto doc =
- BSON("positive_int" << pos << "negative_int" << neg << "non_negative_int" << nonneg
- << "non_positive_int"
- << nonpos
- << "byte_range_int"
- << byte_range
- << "range_int"
- << int_range);
+ auto doc = BSON("positive_int" << pos << "negative_int" << neg << "non_negative_int"
+ << nonneg << "non_positive_int" << nonpos << "byte_range_int"
+ << byte_range << "range_int" << int_range);
ASSERT_THROWS(Int_basic_ranges::parse(ctxt, doc), AssertionException);
};
@@ -2744,13 +2641,9 @@ TEST(IDLValidatedField, Double_basic_ranges) {
const auto tryPass =
[](double pos, double neg, double nonneg, double nonpos, double double_range) {
IDLParserErrorContext ctxt("root");
- auto doc =
- BSON("positive_double" << pos << "negative_double" << neg << "non_negative_double"
- << nonneg
- << "non_positive_double"
- << nonpos
- << "range_double"
- << double_range);
+ auto doc = BSON("positive_double"
+ << pos << "negative_double" << neg << "non_negative_double" << nonneg
+ << "non_positive_double" << nonpos << "range_double" << double_range);
auto obj = Double_basic_ranges::parse(ctxt, doc);
ASSERT_EQUALS(obj.getPositive_double(), pos);
ASSERT_EQUALS(obj.getNegative_double(), neg);
@@ -2763,13 +2656,9 @@ TEST(IDLValidatedField, Double_basic_ranges) {
const auto tryFail =
[](double pos, double neg, double nonneg, double nonpos, double double_range) {
IDLParserErrorContext ctxt("root");
- auto doc =
- BSON("positive_double" << pos << "negative_double" << neg << "non_negative_double"
- << nonneg
- << "non_positive_double"
- << nonpos
- << "range_double"
- << double_range);
+ auto doc = BSON("positive_double"
+ << pos << "negative_double" << neg << "non_negative_double" << nonneg
+ << "non_positive_double" << nonpos << "range_double" << double_range);
ASSERT_THROWS(Double_basic_ranges::parse(ctxt, doc), AssertionException);
};
@@ -2807,8 +2696,7 @@ TEST(IDLValidatedField, Callback_validators) {
[](std::int32_t int_even, double double_nearly_int, StringData string_starts_with_x) {
IDLParserErrorContext ctxt("root");
auto doc = BSON("int_even" << int_even << "double_nearly_int" << double_nearly_int
- << "string_starts_with_x"
- << string_starts_with_x);
+ << "string_starts_with_x" << string_starts_with_x);
auto obj = Callback_validators::parse(ctxt, doc);
ASSERT_EQUALS(obj.getInt_even(), int_even);
ASSERT_EQUALS(obj.getDouble_nearly_int(), double_nearly_int);
@@ -2820,8 +2708,7 @@ TEST(IDLValidatedField, Callback_validators) {
[](std::int32_t int_even, double double_nearly_int, StringData string_starts_with_x) {
IDLParserErrorContext ctxt("root");
auto doc = BSON("int_even" << int_even << "double_nearly_int" << double_nearly_int
- << "string_starts_with_x"
- << string_starts_with_x);
+ << "string_starts_with_x" << string_starts_with_x);
ASSERT_THROWS(Callback_validators::parse(ctxt, doc), AssertionException);
};
@@ -2844,9 +2731,7 @@ TEST(IDLTypeCommand, TestString) {
IDLParserErrorContext ctxt("root");
auto testDoc = BSON(CommandTypeStringCommand::kCommandName << "foo"
- << "field1"
- << 3
- << "$db"
+ << "field1" << 3 << "$db"
<< "db");
auto testStruct = CommandTypeStringCommand::parse(ctxt, makeOMR(testDoc));
@@ -2866,8 +2751,7 @@ TEST(IDLTypeCommand, TestString) {
// Positive: Test we can serialize from nothing the same document except for $db
{
auto testDocWithoutDb = BSON(CommandTypeStringCommand::kCommandName << "foo"
- << "field1"
- << 3);
+ << "field1" << 3);
BSONObjBuilder builder;
CommandTypeStringCommand one_new("foo");
@@ -3007,9 +2891,7 @@ TEST(IDLTypeCommand, TestUnderscoreCommand) {
IDLParserErrorContext ctxt("root");
auto testDoc = BSON(WellNamedCommand::kCommandName << "foo"
- << "field1"
- << 3
- << "$db"
+ << "field1" << 3 << "$db"
<< "db");
auto testStruct = WellNamedCommand::parse(ctxt, makeOMR(testDoc));
@@ -3029,8 +2911,7 @@ TEST(IDLTypeCommand, TestUnderscoreCommand) {
// Positive: Test we can serialize from nothing the same document except for $db
{
auto testDocWithoutDb = BSON(WellNamedCommand::kCommandName << "foo"
- << "field1"
- << 3);
+ << "field1" << 3);
BSONObjBuilder builder;
WellNamedCommand one_new("foo");
diff --git a/src/mongo/idl/server_parameter_specialized_test.cpp b/src/mongo/idl/server_parameter_specialized_test.cpp
index 7bf9cdb6bc6..52a7e8af8b0 100644
--- a/src/mongo/idl/server_parameter_specialized_test.cpp
+++ b/src/mongo/idl/server_parameter_specialized_test.cpp
@@ -242,24 +242,20 @@ TEST(SpecializedServerParameter, multiValue) {
ASSERT_APPENDED_OBJECT(edsp,
BSON("value"
<< "start value"
- << "flag"
- << true));
+ << "flag" << true));
ASSERT_OK(edsp->setFromString("second value"));
ASSERT_APPENDED_OBJECT(edsp,
BSON("value"
<< "second value"
- << "flag"
- << false));
+ << "flag" << false));
ASSERT_OK(edsp->set(BSON("" << BSON("value"
<< "third value"
- << "flag"
- << true))
+ << "flag" << true))
.firstElement()));
ASSERT_APPENDED_OBJECT(edsp,
BSON("value"
<< "third value"
- << "flag"
- << true));
+ << "flag" << true));
}
// specializedWithCtorAndValue
diff --git a/src/mongo/idl/server_parameter_with_storage.h b/src/mongo/idl/server_parameter_with_storage.h
index 30d1d4abc7d..480983f4ad2 100644
--- a/src/mongo/idl/server_parameter_with_storage.h
+++ b/src/mongo/idl/server_parameter_with_storage.h
@@ -294,15 +294,12 @@ public:
*/
template <class predicate>
void addBound(const element_type& bound) {
- addValidator([ bound, spname = name() ](const element_type& value) {
+ addValidator([bound, spname = name()](const element_type& value) {
if (!predicate::evaluate(value, bound)) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Invalid value for parameter " << spname << ": "
- << value
- << " is not "
- << predicate::description
- << " "
- << bound);
+ str::stream()
+ << "Invalid value for parameter " << spname << ": " << value
+ << " is not " << predicate::description << " " << bound);
}
return Status::OK();
});
diff --git a/src/mongo/logger/encoder.h b/src/mongo/logger/encoder.h
index f51642d62e7..29226dc2f5c 100644
--- a/src/mongo/logger/encoder.h
+++ b/src/mongo/logger/encoder.h
@@ -48,4 +48,4 @@ public:
};
} // namespace logger
-} // nnamspace mongo
+} // namespace mongo
diff --git a/src/mongo/logger/log_component.cpp b/src/mongo/logger/log_component.cpp
index a214fd757e2..ec389788d43 100644
--- a/src/mongo/logger/log_component.cpp
+++ b/src/mongo/logger/log_component.cpp
@@ -245,5 +245,5 @@ std::ostream& operator<<(std::ostream& os, LogComponent component) {
return os << component.getNameForLog();
}
-} // logger
-} // mongo
+} // namespace logger
+} // namespace mongo
diff --git a/src/mongo/logger/log_component_settings.cpp b/src/mongo/logger/log_component_settings.cpp
index c43e5d7d79b..f1da736be2c 100644
--- a/src/mongo/logger/log_component_settings.cpp
+++ b/src/mongo/logger/log_component_settings.cpp
@@ -117,5 +117,5 @@ bool LogComponentSettings::shouldLog(LogComponent component, LogSeverity severit
return severity >= LogSeverity::cast(_minimumLoggedSeverity[component].loadRelaxed());
}
-} // logger
-} // mongo
+} // namespace logger
+} // namespace mongo
diff --git a/src/mongo/logger/log_manager.cpp b/src/mongo/logger/log_manager.cpp
index 6d349e29459..c167853353f 100644
--- a/src/mongo/logger/log_manager.cpp
+++ b/src/mongo/logger/log_manager.cpp
@@ -72,5 +72,5 @@ bool LogManager::isDefaultConsoleAppenderAttached() const {
return static_cast<bool>(_defaultAppender);
}
-} // logger
-} // mongo
+} // namespace logger
+} // namespace mongo
diff --git a/src/mongo/logger/log_severity.cpp b/src/mongo/logger/log_severity.cpp
index 90ba9967e88..349be573983 100644
--- a/src/mongo/logger/log_severity.cpp
+++ b/src/mongo/logger/log_severity.cpp
@@ -46,7 +46,11 @@ constexpr auto infoSeverityString = "info"_sd;
constexpr auto debugSeverityString = "debug"_sd;
constexpr StringData kDebugLevelStrings[LogSeverity::kMaxDebugLevel] = {
- "D1"_sd, "D2"_sd, "D3"_sd, "D4"_sd, "D5"_sd,
+ "D1"_sd,
+ "D2"_sd,
+ "D3"_sd,
+ "D4"_sd,
+ "D5"_sd,
};
} // namespace
diff --git a/src/mongo/logger/log_test.cpp b/src/mongo/logger/log_test.cpp
index 836e82bb8ea..9fb534a135f 100644
--- a/src/mongo/logger/log_test.cpp
+++ b/src/mongo/logger/log_test.cpp
@@ -382,8 +382,7 @@ void testEncodedLogLine(const MessageEventEphemeral& event, const std::string& e
std::string s = os.str();
if (s.find(expectedSubstring) == std::string::npos) {
FAIL(str::stream() << "encoded log line does not contain substring \"" << expectedSubstring
- << "\". log line: "
- << s);
+ << "\". log line: " << s);
}
}
diff --git a/src/mongo/logger/parse_log_component_settings.cpp b/src/mongo/logger/parse_log_component_settings.cpp
index 7a8ee40f7cc..1982587e130 100644
--- a/src/mongo/logger/parse_log_component_settings.cpp
+++ b/src/mongo/logger/parse_log_component_settings.cpp
@@ -81,10 +81,10 @@ StatusWith<std::vector<LogComponentSetting>> parseLogComponentSettings(const BSO
if (elem.fieldNameStringData() == "verbosity") {
if (!elem.isNumber()) {
return StatusWith<Result>(ErrorCodes::BadValue,
- str::stream() << "Expected "
- << parentComponent.getDottedName()
- << ".verbosity to be a number, but found "
- << typeName(elem.type()));
+ str::stream()
+ << "Expected " << parentComponent.getDottedName()
+ << ".verbosity to be a number, but found "
+ << typeName(elem.type()));
}
levelsToSet.push_back((LogComponentSetting(parentComponent, elem.numberInt())));
continue;
@@ -93,22 +93,20 @@ StatusWith<std::vector<LogComponentSetting>> parseLogComponentSettings(const BSO
const LogComponent curr = _getComponentForShortName(shortName);
if (curr == LogComponent::kNumLogComponents || curr.parent() != parentComponent) {
- return StatusWith<Result>(
- ErrorCodes::BadValue,
- str::stream() << "Invalid component name " << parentComponent.getDottedName() << "."
- << shortName);
+ return StatusWith<Result>(ErrorCodes::BadValue,
+ str::stream()
+ << "Invalid component name "
+ << parentComponent.getDottedName() << "." << shortName);
}
if (elem.isNumber()) {
levelsToSet.push_back(LogComponentSetting(curr, elem.numberInt()));
continue;
}
if (elem.type() != Object) {
- return StatusWith<Result>(ErrorCodes::BadValue,
- str::stream() << "Invalid type " << typeName(elem.type())
- << "for component "
- << parentComponent.getDottedName()
- << "."
- << shortName);
+ return StatusWith<Result>(
+ ErrorCodes::BadValue,
+ str::stream() << "Invalid type " << typeName(elem.type()) << "for component "
+ << parentComponent.getDottedName() << "." << shortName);
}
iterators.push_back(iter);
parentComponent = curr;
diff --git a/src/mongo/logger/parse_log_component_settings_test.cpp b/src/mongo/logger/parse_log_component_settings_test.cpp
index 5d91f7b0f78..2271a16dbdd 100644
--- a/src/mongo/logger/parse_log_component_settings_test.cpp
+++ b/src/mongo/logger/parse_log_component_settings_test.cpp
@@ -140,10 +140,7 @@ TEST(Multi, FailBadComponent) {
BSONObj input =
BSON("verbosity" << 6 << "accessControl" << BSON("verbosity" << 5) << "storage"
<< BSON("verbosity" << 4 << "journal" << BSON("verbosity" << 6))
- << "No Such Component"
- << BSON("verbosity" << 2)
- << "extrafield"
- << 123);
+ << "No Such Component" << BSON("verbosity" << 2) << "extrafield" << 123);
StatusWith<Settings> result = parseLogComponentSettings(input);
@@ -175,4 +172,4 @@ TEST(DeeplyNested, FailLast) {
ASSERT_EQUALS(result.getStatus().reason(),
"Invalid component name storage.journal.No Such Component");
}
-}
+} // namespace
diff --git a/src/mongo/logger/ramlog.cpp b/src/mongo/logger/ramlog.cpp
index a2bf4bffd15..67e1fe4b89b 100644
--- a/src/mongo/logger/ramlog.cpp
+++ b/src/mongo/logger/ramlog.cpp
@@ -221,4 +221,4 @@ MONGO_INITIALIZER(RamLogCatalog)(InitializerContext*) {
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/logger/ramlog.h b/src/mongo/logger/ramlog.h
index 15ad0d8526e..306dc36bff4 100644
--- a/src/mongo/logger/ramlog.h
+++ b/src/mongo/logger/ramlog.h
@@ -183,4 +183,4 @@ public:
private:
RamLog* _ramlog;
};
-}
+} // namespace mongo
diff --git a/src/mongo/logger/rotatable_file_writer.cpp b/src/mongo/logger/rotatable_file_writer.cpp
index a8bd27a0fc4..0c5ed612b99 100644
--- a/src/mongo/logger/rotatable_file_writer.cpp
+++ b/src/mongo/logger/rotatable_file_writer.cpp
@@ -70,7 +70,7 @@ std::wstring utf8ToWide(StringData utf8Str) {
utf8Str.size(), // Count
tempBuffer.get(), // UTF-16 output buffer
utf8Str.size() // Buffer size in wide characters
- );
+ );
// TODO(schwerin): fassert finalSize > 0?
return std::wstring(tempBuffer.get(), finalSize);
}
@@ -142,7 +142,7 @@ bool Win32FileStreambuf::open(StringData fileName, bool append) {
OPEN_ALWAYS, // dwCreationDisposition
FILE_ATTRIBUTE_NORMAL, // dwFlagsAndAttributes
nullptr // hTemplateFile
- );
+ );
if (INVALID_HANDLE_VALUE == _fileHandle)
@@ -212,9 +212,9 @@ Status RotatableFileWriter::Use::rotate(bool renameOnRotate, const std::string&
try {
if (boost::filesystem::exists(renameTarget)) {
return Status(ErrorCodes::FileRenameFailed,
- str::stream() << "Renaming file " << _writer->_fileName << " to "
- << renameTarget
- << " failed; destination already exists");
+ str::stream()
+ << "Renaming file " << _writer->_fileName << " to "
+ << renameTarget << " failed; destination already exists");
}
} catch (const std::exception& e) {
return Status(ErrorCodes::FileRenameFailed,
@@ -229,11 +229,9 @@ Status RotatableFileWriter::Use::rotate(bool renameOnRotate, const std::string&
boost::filesystem::rename(_writer->_fileName, renameTarget, ec);
if (ec) {
return Status(ErrorCodes::FileRenameFailed,
- str::stream() << "Failed to rename \"" << _writer->_fileName
- << "\" to \""
- << renameTarget
- << "\": "
- << ec.message());
+ str::stream()
+ << "Failed to rename \"" << _writer->_fileName << "\" to \""
+ << renameTarget << "\": " << ec.message());
// TODO(schwerin): Make errnoWithDescription() available in the logger library, and
// use it here.
}
diff --git a/src/mongo/logger/rotatable_file_writer_test.cpp b/src/mongo/logger/rotatable_file_writer_test.cpp
index 2254e96f0be..ee97a5bede2 100644
--- a/src/mongo/logger/rotatable_file_writer_test.cpp
+++ b/src/mongo/logger/rotatable_file_writer_test.cpp
@@ -143,4 +143,4 @@ TEST_F(RotatableFileWriterTest, RotationTest) {
}
}
-} // namespace mongo
+} // namespace
diff --git a/src/mongo/logv2/log_component.cpp b/src/mongo/logv2/log_component.cpp
index 90b9fe52069..ee90ae63e86 100644
--- a/src/mongo/logv2/log_component.cpp
+++ b/src/mongo/logv2/log_component.cpp
@@ -241,5 +241,5 @@ StringData LogComponent::getNameForLog() const {
MONGO_UNREACHABLE;
}
-} // logv2
-} // mongo
+} // namespace logv2
+} // namespace mongo
diff --git a/src/mongo/logv2/log_component_settings.cpp b/src/mongo/logv2/log_component_settings.cpp
index 603cfd6729f..0003dfcbe8d 100644
--- a/src/mongo/logv2/log_component_settings.cpp
+++ b/src/mongo/logv2/log_component_settings.cpp
@@ -117,5 +117,5 @@ bool LogComponentSettings::shouldLog(LogComponent component, LogSeverity severit
return severity >= LogSeverity::cast(_minimumLoggedSeverity[component].loadRelaxed());
}
-} // logv2
-} // mongo
+} // namespace logv2
+} // namespace mongo
diff --git a/src/mongo/logv2/log_domain_global.cpp b/src/mongo/logv2/log_domain_global.cpp
index 2bcb5e224cf..16e00e8f4cc 100644
--- a/src/mongo/logv2/log_domain_global.cpp
+++ b/src/mongo/logv2/log_domain_global.cpp
@@ -42,5 +42,5 @@ LogSource& LogDomainGlobal::source() {
boost::shared_ptr<boost::log::core> LogDomainGlobal::core() {
return boost::log::core::get();
}
-}
-}
+} // namespace logv2
+} // namespace mongo
diff --git a/src/mongo/logv2/log_manager.cpp b/src/mongo/logv2/log_manager.cpp
index 89eb636bf3d..4cd8e7d8382 100644
--- a/src/mongo/logv2/log_manager.cpp
+++ b/src/mongo/logv2/log_manager.cpp
@@ -121,5 +121,5 @@ bool LogManager::isDefaultBackendsAttached() const {
return _impl->_defaultBackendsAttached;
}
-} // logv2
-} // mongo
+} // namespace logv2
+} // namespace mongo
diff --git a/src/mongo/logv2/log_manager.h b/src/mongo/logv2/log_manager.h
index d80fde255ae..059a3e7d019 100644
--- a/src/mongo/logv2/log_manager.h
+++ b/src/mongo/logv2/log_manager.h
@@ -58,10 +58,10 @@ public:
LogDomain& getGlobalDomain();
/**
- * Detaches the default log backends
- *
- * @note This function is not thread safe.
- */
+ * Detaches the default log backends
+ *
+ * @note This function is not thread safe.
+ */
void detachDefaultBackends();
/**
@@ -72,8 +72,8 @@ public:
void reattachDefaultBackends();
/**
- * Checks if the default log backends are attached
- */
+ * Checks if the default log backends are attached
+ */
bool isDefaultBackendsAttached() const;
private:
diff --git a/src/mongo/logv2/log_severity.cpp b/src/mongo/logv2/log_severity.cpp
index e0286b018f9..896509f5d39 100644
--- a/src/mongo/logv2/log_severity.cpp
+++ b/src/mongo/logv2/log_severity.cpp
@@ -46,7 +46,11 @@ constexpr auto infoSeverityString = "info"_sd;
constexpr auto debugSeverityString = "debug"_sd;
constexpr StringData kDebugLevelStrings[LogSeverity::kMaxDebugLevel] = {
- "D1"_sd, "D2"_sd, "D3"_sd, "D4"_sd, "D5"_sd,
+ "D1"_sd,
+ "D2"_sd,
+ "D3"_sd,
+ "D4"_sd,
+ "D5"_sd,
};
} // namespace
diff --git a/src/mongo/logv2/log_source.h b/src/mongo/logv2/log_source.h
index a833e36feb4..2a3b43653bf 100644
--- a/src/mongo/logv2/log_source.h
+++ b/src/mongo/logv2/log_source.h
@@ -46,16 +46,13 @@ namespace mongo {
namespace logv2 {
// Custom logging source that automatically add our set of attributes
-class LogSource
- : public boost::log::sources::basic_logger<char,
- LogSource,
- boost::log::sources::single_thread_model> {
+class LogSource : public boost::log::sources::
+ basic_logger<char, LogSource, boost::log::sources::single_thread_model> {
private:
private:
- typedef boost::log::sources::basic_logger<char,
- LogSource,
- boost::log::sources::single_thread_model>
- base_type;
+ typedef boost::log::sources::
+ basic_logger<char, LogSource, boost::log::sources::single_thread_model>
+ base_type;
public:
LogSource() : LogSource(boost::log::core::get()) {}
diff --git a/src/mongo/logv2/logv2_bm.cpp b/src/mongo/logv2/logv2_bm.cpp
index 0323d7a0100..73958db80f8 100644
--- a/src/mongo/logv2/logv2_bm.cpp
+++ b/src/mongo/logv2/logv2_bm.cpp
@@ -30,11 +30,11 @@
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
-#include "mongo/logv2/log.h"
#include "mongo/logger/console_appender.h"
#include "mongo/logger/logger.h"
#include "mongo/logger/message_event_utf8_encoder.h"
#include "mongo/logv2/component_settings_filter.h"
+#include "mongo/logv2/log.h"
#include "mongo/logv2/log_domain_impl.h"
#include "mongo/logv2/text_formatter.h"
#include "mongo/platform/basic.h"
diff --git a/src/mongo/platform/atomic_proxy.h b/src/mongo/platform/atomic_proxy.h
index 70c367421ec..b65cb5fb232 100644
--- a/src/mongo/platform/atomic_proxy.h
+++ b/src/mongo/platform/atomic_proxy.h
@@ -40,9 +40,9 @@
namespace mongo {
/**
-* Provides a simple version of an atomic version of T
-* that uses std::atomic<BaseWordT> as a backing type;
-*/
+ * Provides a simple version of an atomic version of T
+ * that uses std::atomic<BaseWordT> as a backing type;
+ */
template <typename T, typename BaseWordT>
class AtomicProxy {
MONGO_STATIC_ASSERT_MSG(sizeof(T) == sizeof(BaseWordT),
@@ -87,4 +87,4 @@ private:
};
using AtomicDouble = AtomicProxy<double, std::uint64_t>;
-}
+} // namespace mongo
diff --git a/src/mongo/platform/bits.h b/src/mongo/platform/bits.h
index 721e7be9c92..b12bda75b3e 100644
--- a/src/mongo/platform/bits.h
+++ b/src/mongo/platform/bits.h
@@ -93,4 +93,4 @@ int countTrailingZeros64(unsigned long long num) {
#else
#error "No bit-ops definitions for your platform"
#endif
-}
+} // namespace mongo
diff --git a/src/mongo/platform/bits_test.cpp b/src/mongo/platform/bits_test.cpp
index bb8f014e051..f6234660682 100644
--- a/src/mongo/platform/bits_test.cpp
+++ b/src/mongo/platform/bits_test.cpp
@@ -54,4 +54,4 @@ TEST(BitsTest_CountZeros, EachBit) {
ASSERT_EQUALS(countTrailingZeros64(x), i);
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/platform/condition_variable_test.cpp b/src/mongo/platform/condition_variable_test.cpp
index cd12e037537..88e7a40c617 100644
--- a/src/mongo/platform/condition_variable_test.cpp
+++ b/src/mongo/platform/condition_variable_test.cpp
@@ -58,4 +58,4 @@ TEST(ConditionVariable, BasicSingleThread) {
cv.notify_one();
worker.join();
}
-}
+} // namespace mongo
diff --git a/src/mongo/platform/decimal128_test.cpp b/src/mongo/platform/decimal128_test.cpp
index c865969b44c..835790ad5d7 100644
--- a/src/mongo/platform/decimal128_test.cpp
+++ b/src/mongo/platform/decimal128_test.cpp
@@ -1416,9 +1416,9 @@ TEST(Decimal128Test, TestDecimal128GetLargestNegativeExponentZero) {
}
/**
-* Test data was generated using 64 bit versions of these functions, so we must test
-* approximate results.
-*/
+ * Test data was generated using 64 bit versions of these functions, so we must test
+ * approximate results.
+ */
void assertDecimal128ApproxEqual(Decimal128 x, Decimal128 y) {
ASSERT_TRUE(x.subtract(y).toAbs().isLess(Decimal128("0.00000005")));
diff --git a/src/mongo/platform/mutex_test.cpp b/src/mongo/platform/mutex_test.cpp
index 6c6674aa197..d0230d40fa5 100644
--- a/src/mongo/platform/mutex_test.cpp
+++ b/src/mongo/platform/mutex_test.cpp
@@ -40,4 +40,4 @@ TEST(MongoMutexTest, BasicSingleThread) {
ASSERT(m.try_lock());
m.unlock();
}
-}
+} // namespace mongo
diff --git a/src/mongo/platform/random_test.cpp b/src/mongo/platform/random_test.cpp
index f2d5353887e..ee82a89490f 100644
--- a/src/mongo/platform/random_test.cpp
+++ b/src/mongo/platform/random_test.cpp
@@ -219,4 +219,4 @@ TEST(RandomTest, Secure1) {
ASSERT_NOT_EQUALS(a->nextInt64(), b->nextInt64());
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/platform/shared_library_posix.cpp b/src/mongo/platform/shared_library_posix.cpp
index a383efab94e..871bd697012 100644
--- a/src/mongo/platform/shared_library_posix.cpp
+++ b/src/mongo/platform/shared_library_posix.cpp
@@ -79,8 +79,7 @@ StatusWith<void*> SharedLibrary::getSymbol(StringData name) {
if (error_msg != nullptr) {
return StatusWith<void*>(ErrorCodes::InternalError,
str::stream() << "dlsym failed for symbol " << name
- << " with error message: "
- << error_msg);
+ << " with error message: " << error_msg);
}
return StatusWith<void*>(symbol);
diff --git a/src/mongo/platform/strcasestr.h b/src/mongo/platform/strcasestr.h
index 1530520f1a2..6f9b42cb5a0 100644
--- a/src/mongo/platform/strcasestr.h
+++ b/src/mongo/platform/strcasestr.h
@@ -36,7 +36,7 @@ namespace pal {
const char* strcasestr(const char* haystack, const char* needle);
}
using mongo::pal::strcasestr;
-}
+} // namespace mongo
#else
diff --git a/src/mongo/rpc/get_status_from_command_result.cpp b/src/mongo/rpc/get_status_from_command_result.cpp
index b63ae786832..2607ff15e3c 100644
--- a/src/mongo/rpc/get_status_from_command_result.cpp
+++ b/src/mongo/rpc/get_status_from_command_result.cpp
@@ -98,14 +98,14 @@ Status getWriteConcernStatusFromCommandResult(const BSONObj& obj) {
std::string wcErrorParseMsg;
if (!wcError.parseBSON(wcErrObj, &wcErrorParseMsg)) {
return Status(ErrorCodes::UnsupportedFormat,
- str::stream() << "Failed to parse write concern section due to "
- << wcErrorParseMsg);
+ str::stream()
+ << "Failed to parse write concern section due to " << wcErrorParseMsg);
}
std::string wcErrorInvalidMsg;
if (!wcError.isValid(&wcErrorInvalidMsg)) {
return Status(ErrorCodes::UnsupportedFormat,
- str::stream() << "Failed to parse write concern section due to "
- << wcErrorInvalidMsg);
+ str::stream()
+ << "Failed to parse write concern section due to " << wcErrorInvalidMsg);
}
return wcError.toStatus();
}
diff --git a/src/mongo/rpc/legacy_reply.cpp b/src/mongo/rpc/legacy_reply.cpp
index 75c69c16d9f..affdadbd38c 100644
--- a/src/mongo/rpc/legacy_reply.cpp
+++ b/src/mongo/rpc/legacy_reply.cpp
@@ -54,20 +54,17 @@ LegacyReply::LegacyReply(const Message* message) {
uassert(ErrorCodes::BadValue,
str::stream() << "Got legacy command reply with a bad cursorId field,"
- << " expected a value of 0 but got "
- << qr.getCursorId(),
+ << " expected a value of 0 but got " << qr.getCursorId(),
qr.getCursorId() == 0);
uassert(ErrorCodes::BadValue,
str::stream() << "Got legacy command reply with a bad nReturned field,"
- << " expected a value of 1 but got "
- << qr.getNReturned(),
+ << " expected a value of 1 but got " << qr.getNReturned(),
qr.getNReturned() == 1);
uassert(ErrorCodes::BadValue,
str::stream() << "Got legacy command reply with a bad startingFrom field,"
- << " expected a value of 0 but got "
- << qr.getStartingFrom(),
+ << " expected a value of 0 but got " << qr.getStartingFrom(),
qr.getStartingFrom() == 0);
auto status = Validator<BSONObj>::validateLoad(qr.data(), qr.dataLen());
diff --git a/src/mongo/rpc/legacy_request.cpp b/src/mongo/rpc/legacy_request.cpp
index 426eba475fc..2c05714d4f4 100644
--- a/src/mongo/rpc/legacy_request.cpp
+++ b/src/mongo/rpc/legacy_request.cpp
@@ -48,9 +48,7 @@ OpMsgRequest opMsgRequestFromLegacyRequest(const Message& message) {
if (qm.queryOptions & QueryOption_Exhaust) {
uasserted(18527,
str::stream() << "The 'exhaust' OP_QUERY flag is invalid for commands: "
- << ns.ns()
- << " "
- << qm.query.toString());
+ << ns.ns() << " " << qm.query.toString());
}
uassert(40473,
diff --git a/src/mongo/rpc/metadata.cpp b/src/mongo/rpc/metadata.cpp
index c217db2d9f4..e3ed093a693 100644
--- a/src/mongo/rpc/metadata.cpp
+++ b/src/mongo/rpc/metadata.cpp
@@ -148,7 +148,7 @@ bool isArrayOfObjects(BSONElement array) {
return true;
}
-}
+} // namespace
OpMsgRequest upconvertRequest(StringData db, BSONObj cmdObj, int queryFlags) {
cmdObj = cmdObj.getOwned(); // Usually this is a no-op since it is already owned.
diff --git a/src/mongo/rpc/metadata/client_metadata.cpp b/src/mongo/rpc/metadata/client_metadata.cpp
index d2199c13016..9b51a4bc750 100644
--- a/src/mongo/rpc/metadata/client_metadata.cpp
+++ b/src/mongo/rpc/metadata/client_metadata.cpp
@@ -99,8 +99,7 @@ Status ClientMetadata::parseClientMetadataDocument(const BSONObj& doc) {
if (static_cast<uint32_t>(doc.objsize()) > maxLength) {
return Status(ErrorCodes::ClientMetadataDocumentTooLarge,
str::stream() << "The client metadata document must be less then or equal to "
- << maxLength
- << "bytes");
+ << maxLength << "bytes");
}
// Get a copy so that we can take a stable reference to the app name inside
@@ -135,9 +134,10 @@ Status ClientMetadata::parseClientMetadataDocument(const BSONObj& doc) {
} else if (name == kDriver) {
if (!e.isABSONObj()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "The '" << kDriver << "' field is required to be a "
- "BSON document in the client "
- "metadata document");
+ str::stream() << "The '" << kDriver
+ << "' field is required to be a "
+ "BSON document in the client "
+ "metadata document");
}
Status s = validateDriverDocument(e.Obj());
@@ -196,10 +196,10 @@ StatusWith<StringData> ClientMetadata::parseApplicationDocument(const BSONObj& d
if (name == kName) {
if (e.type() != String) {
- return {
- ErrorCodes::TypeMismatch,
- str::stream() << "The '" << kApplication << "." << kName
- << "' field must be a string in the client metadata document"};
+ return {ErrorCodes::TypeMismatch,
+ str::stream()
+ << "The '" << kApplication << "." << kName
+ << "' field must be a string in the client metadata document"};
}
StringData value = e.checkAndGetStringData();
@@ -230,18 +230,18 @@ Status ClientMetadata::validateDriverDocument(const BSONObj& doc) {
if (name == kName) {
if (e.type() != String) {
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << "The '" << kDriver << "." << kName
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << "The '" << kDriver << "." << kName
<< "' field must be a string in the client metadata document");
}
foundName = true;
} else if (name == kVersion) {
if (e.type() != String) {
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << "The '" << kDriver << "." << kVersion
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << "The '" << kDriver << "." << kVersion
<< "' field must be a string in the client metadata document");
}
@@ -274,9 +274,9 @@ Status ClientMetadata::validateOperatingSystemDocument(const BSONObj& doc) {
if (name == kType) {
if (e.type() != String) {
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << "The '" << kOperatingSystem << "." << kType
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << "The '" << kOperatingSystem << "." << kType
<< "' field must be a string in the client metadata document");
}
@@ -287,8 +287,7 @@ Status ClientMetadata::validateOperatingSystemDocument(const BSONObj& doc) {
if (foundType == false) {
return Status(ErrorCodes::ClientMetadataMissingField,
str::stream() << "Missing required field '" << kOperatingSystem << "."
- << kType
- << "' in the client metadata document");
+ << kType << "' in the client metadata document");
}
return Status::OK();
diff --git a/src/mongo/rpc/metadata/client_metadata_test.cpp b/src/mongo/rpc/metadata/client_metadata_test.cpp
index e70355d37ca..be9b666d222 100644
--- a/src/mongo/rpc/metadata/client_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/client_metadata_test.cpp
@@ -86,13 +86,11 @@ TEST(ClientMetadatTest, TestLoopbackTest) {
ASSERT_EQUALS("g", swParseStatus.getValue().get().getApplicationName());
BSONObj outDoc =
- BSON(kMetadataDoc << BSON(
- kApplication << BSON(kName << "g") << kDriver
- << BSON(kName << "a" << kVersion << "b")
- << kOperatingSystem
- << BSON(kType << "c" << kName << "d" << kArchitecture << "e"
- << kVersion
- << "f")));
+ BSON(kMetadataDoc << BSON(kApplication
+ << BSON(kName << "g") << kDriver
+ << BSON(kName << "a" << kVersion << "b") << kOperatingSystem
+ << BSON(kType << "c" << kName << "d" << kArchitecture << "e"
+ << kVersion << "f")));
ASSERT_BSONOBJ_EQ(obj, outDoc);
}
@@ -105,11 +103,11 @@ TEST(ClientMetadatTest, TestLoopbackTest) {
auto swParseStatus = ClientMetadata::parse(obj[kMetadataDoc]);
ASSERT_OK(swParseStatus.getStatus());
- BSONObj outDoc = BSON(
- kMetadataDoc << BSON(
- kDriver << BSON(kName << "a" << kVersion << "b") << kOperatingSystem
- << BSON(kType << "c" << kName << "d" << kArchitecture << "e" << kVersion
- << "f")));
+ BSONObj outDoc =
+ BSON(kMetadataDoc << BSON(kDriver
+ << BSON(kName << "a" << kVersion << "b") << kOperatingSystem
+ << BSON(kType << "c" << kName << "d" << kArchitecture << "e"
+ << kVersion << "f")));
ASSERT_BSONOBJ_EQ(obj, outDoc);
}
@@ -150,8 +148,7 @@ TEST(ClientMetadatTest, TestRequiredOnlyFields) {
// With AppName
ASSERT_DOC_OK(kApplication << BSON(kName << "1") << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
+ << BSON(kName << "n1" << kVersion << "v1") << kOperatingSystem
<< BSON(kType << kUnknown));
}
@@ -160,24 +157,20 @@ TEST(ClientMetadatTest, TestRequiredOnlyFields) {
TEST(ClientMetadatTest, TestWithAppNameSpelledWrong) {
ASSERT_DOC_OK(kApplication << BSON("extra"
<< "1")
- << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
- << BSON(kType << kUnknown));
+ << kDriver << BSON(kName << "n1" << kVersion << "v1")
+ << kOperatingSystem << BSON(kType << kUnknown));
}
// Positive: test with empty application document
TEST(ClientMetadatTest, TestWithEmptyApplication) {
ASSERT_DOC_OK(kApplication << BSONObj() << kDriver << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
- << BSON(kType << kUnknown));
+ << kOperatingSystem << BSON(kType << kUnknown));
}
// Negative: test with appplication wrong type
TEST(ClientMetadatTest, TestNegativeWithAppNameWrongType) {
ASSERT_DOC_NOT_OK(kApplication << "1" << kDriver << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
- << BSON(kType << kUnknown));
+ << kOperatingSystem << BSON(kType << kUnknown));
}
// Positive: test with extra fields
@@ -185,10 +178,8 @@ TEST(ClientMetadatTest, TestExtraFields) {
ASSERT_DOC_OK(kApplication << BSON(kName << "1"
<< "extra"
<< "v1")
- << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
- << BSON(kType << kUnknown));
+ << kDriver << BSON(kName << "n1" << kVersion << "v1")
+ << kOperatingSystem << BSON(kType << kUnknown));
ASSERT_DOC_OK(kApplication << BSON(kName << "1"
<< "extra"
<< "v1")
@@ -196,24 +187,19 @@ TEST(ClientMetadatTest, TestExtraFields) {
<< BSON(kName << "n1" << kVersion << "v1"
<< "extra"
<< "v1")
- << kOperatingSystem
- << BSON(kType << kUnknown));
+ << kOperatingSystem << BSON(kType << kUnknown));
ASSERT_DOC_OK(kApplication << BSON(kName << "1"
<< "extra"
<< "v1")
- << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
+ << kDriver << BSON(kName << "n1" << kVersion << "v1")
<< kOperatingSystem
<< BSON(kType << kUnknown << "extra"
<< "v1"));
ASSERT_DOC_OK(kApplication << BSON(kName << "1"
<< "extra"
<< "v1")
- << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
- << BSON(kType << kUnknown)
- << "extra"
+ << kDriver << BSON(kName << "n1" << kVersion << "v1")
+ << kOperatingSystem << BSON(kType << kUnknown) << "extra"
<< "v1");
}
@@ -236,20 +222,16 @@ TEST(ClientMetadatTest, TestNegativeMissingRequiredOneField) {
// Negative: document with wrong types for required fields
TEST(ClientMetadatTest, TestNegativeWrongTypes) {
ASSERT_DOC_NOT_OK(kApplication << BSON(kName << 1) << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
+ << BSON(kName << "n1" << kVersion << "v1") << kOperatingSystem
<< BSON(kType << kUnknown));
ASSERT_DOC_NOT_OK(kApplication << BSON(kName << "1") << kDriver
- << BSON(kName << 1 << kVersion << "v1")
- << kOperatingSystem
+ << BSON(kName << 1 << kVersion << "v1") << kOperatingSystem
<< BSON(kType << kUnknown));
ASSERT_DOC_NOT_OK(kApplication << BSON(kName << "1") << kDriver
- << BSON(kName << "n1" << kVersion << 1)
- << kOperatingSystem
+ << BSON(kName << "n1" << kVersion << 1) << kOperatingSystem
<< BSON(kType << kUnknown));
ASSERT_DOC_NOT_OK(kApplication << BSON(kName << "1") << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
+ << BSON(kName << "n1" << kVersion << "v1") << kOperatingSystem
<< BSON(kType << 1));
}
@@ -262,20 +244,14 @@ TEST(ClientMetadatTest, TestNegativeLargeDocument) {
{
std::string str(350, 'x');
ASSERT_DOC_OK(kApplication << BSON(kName << "1") << kDriver
- << BSON(kName << "n1" << kVersion << "1")
- << kOperatingSystem
- << BSON(kType << kUnknown)
- << "extra"
- << str);
+ << BSON(kName << "n1" << kVersion << "1") << kOperatingSystem
+ << BSON(kType << kUnknown) << "extra" << str);
}
{
std::string str(512, 'x');
ASSERT_DOC_NOT_OK(kApplication << BSON(kName << "1") << kDriver
- << BSON(kName << "n1" << kVersion << "1")
- << kOperatingSystem
- << BSON(kType << kUnknown)
- << "extra"
- << str);
+ << BSON(kName << "n1" << kVersion << "1") << kOperatingSystem
+ << BSON(kType << kUnknown) << "extra" << str);
}
}
@@ -284,8 +260,7 @@ TEST(ClientMetadatTest, TestNegativeLargeAppName) {
{
std::string str(128, 'x');
ASSERT_DOC_OK(kApplication << BSON(kName << str) << kDriver
- << BSON(kName << "n1" << kVersion << "1")
- << kOperatingSystem
+ << BSON(kName << "n1" << kVersion << "1") << kOperatingSystem
<< BSON(kType << kUnknown));
BSONObjBuilder builder;
@@ -294,8 +269,7 @@ TEST(ClientMetadatTest, TestNegativeLargeAppName) {
{
std::string str(129, 'x');
ASSERT_DOC_NOT_OK(kApplication << BSON(kName << str) << kDriver
- << BSON(kName << "n1" << kVersion << "1")
- << kOperatingSystem
+ << BSON(kName << "n1" << kVersion << "1") << kOperatingSystem
<< BSON(kType << kUnknown));
BSONObjBuilder builder;
@@ -327,8 +301,7 @@ TEST(ClientMetadatTest, TestMongoSAppend) {
<< kOperatingSystem
<< BSON(kType << "c" << kName << "d" << kArchitecture << "e" << kVersion
<< "f")
- << kMongos
- << BSON(kHost << "h" << kClient << "i" << kVersion << "j"));
+ << kMongos << BSON(kHost << "h" << kClient << "i" << kVersion << "j"));
ASSERT_BSONOBJ_EQ(doc, outDoc);
}
diff --git a/src/mongo/rpc/metadata/config_server_metadata.cpp b/src/mongo/rpc/metadata/config_server_metadata.cpp
index 0fb6859b28b..3dffe940087 100644
--- a/src/mongo/rpc/metadata/config_server_metadata.cpp
+++ b/src/mongo/rpc/metadata/config_server_metadata.cpp
@@ -64,9 +64,7 @@ StatusWith<ConfigServerMetadata> ConfigServerMetadata::readFromMetadata(
} else if (metadataElem.type() != mongo::Object) {
return {ErrorCodes::TypeMismatch,
str::stream() << "ConfigServerMetadata element has incorrect type: expected"
- << mongo::Object
- << " but got "
- << metadataElem.type()};
+ << mongo::Object << " but got " << metadataElem.type()};
}
BSONObj configMetadataObj = metadataElem.Obj();
diff --git a/src/mongo/rpc/metadata/logical_time_metadata_test.cpp b/src/mongo/rpc/metadata/logical_time_metadata_test.cpp
index ea9a0fbbdab..a3553a1db6c 100644
--- a/src/mongo/rpc/metadata/logical_time_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/logical_time_metadata_test.cpp
@@ -191,6 +191,6 @@ TEST(LogicalTimeMetadataTest, UpconvertPass) {
converted.body);
}
+} // namespace
} // namespace rpc
} // namespace mongo
-}
diff --git a/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp b/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp
index c79dbeee5d9..9f07a7775ad 100644
--- a/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp
@@ -52,19 +52,12 @@ TEST(ReplResponseMetadataTest, OplogQueryMetadataRoundtrip) {
BSONObjBuilder builder;
metadata.writeToMetadata(&builder).transitional_ignore();
- BSONObj expectedObj(BSON(kOplogQueryMetadataFieldName << BSON(
- "lastOpCommitted"
- << BSON("ts" << opTime1.getTimestamp() << "t" << opTime1.getTerm())
- << "lastCommittedWall"
- << committedWall
- << "lastOpApplied"
- << BSON("ts" << opTime2.getTimestamp() << "t" << opTime2.getTerm())
- << "rbid"
- << 6
- << "primaryIndex"
- << 12
- << "syncSourceIndex"
- << -1)));
+ BSONObj expectedObj(BSON(
+ kOplogQueryMetadataFieldName << BSON(
+ "lastOpCommitted" << BSON("ts" << opTime1.getTimestamp() << "t" << opTime1.getTerm())
+ << "lastCommittedWall" << committedWall << "lastOpApplied"
+ << BSON("ts" << opTime2.getTimestamp() << "t" << opTime2.getTerm())
+ << "rbid" << 6 << "primaryIndex" << 12 << "syncSourceIndex" << -1)));
BSONObj serializedObj = builder.obj();
ASSERT_BSONOBJ_EQ(expectedObj, serializedObj);
diff --git a/src/mongo/rpc/metadata/repl_set_metadata_test.cpp b/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
index a2802b35416..5b3e746d8e7 100644
--- a/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
@@ -63,18 +63,10 @@ TEST(ReplResponseMetadataTest, Roundtrip) {
BSON(kReplSetMetadataFieldName
<< BSON("term" << 3 << "lastOpCommitted"
<< BSON("ts" << opTime.getTimestamp() << "t" << opTime.getTerm())
- << "lastCommittedWall"
- << committedWallTime
- << "lastOpVisible"
+ << "lastCommittedWall" << committedWallTime << "lastOpVisible"
<< BSON("ts" << opTime2.getTimestamp() << "t" << opTime2.getTerm())
- << "configVersion"
- << 6
- << "replicaSetId"
- << metadata.getReplicaSetId()
- << "primaryIndex"
- << 12
- << "syncSourceIndex"
- << -1)));
+ << "configVersion" << 6 << "replicaSetId" << metadata.getReplicaSetId()
+ << "primaryIndex" << 12 << "syncSourceIndex" << -1)));
BSONObj serializedObj = builder.obj();
ASSERT_BSONOBJ_EQ(expectedObj, serializedObj);
diff --git a/src/mongo/rpc/metadata/sharding_metadata_test.cpp b/src/mongo/rpc/metadata/sharding_metadata_test.cpp
index 92d1e5cb24b..dec0fb1c3d1 100644
--- a/src/mongo/rpc/metadata/sharding_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/sharding_metadata_test.cpp
@@ -54,8 +54,7 @@ TEST(ShardingMetadata, ReadFromMetadata) {
auto sm = checkParse(
BSON("$gleStats" << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
<< kLastOpTime.getTerm())
- << "electionId"
- << kElectionId)));
+ << "electionId" << kElectionId)));
ASSERT_EQ(sm.getLastElectionId(), kElectionId);
ASSERT_EQ(sm.getLastOpTime(), kLastOpTime);
}
@@ -89,8 +88,7 @@ TEST(ShardingMetadata, ReadFromInvalidMetadata) {
checkParseFails(
BSON("$gleStats" << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
<< kLastOpTime.getTerm())
- << "electionId"
- << 3)),
+ << "electionId" << 3)),
ErrorCodes::TypeMismatch);
}
{
@@ -104,9 +102,7 @@ TEST(ShardingMetadata, ReadFromInvalidMetadata) {
checkParseFails(
BSON("$gleStats" << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
<< kLastOpTime.getTerm())
- << "electionId"
- << kElectionId
- << "extra"
+ << "electionId" << kElectionId << "extra"
<< "this should not be here")),
ErrorCodes::InvalidOptions);
}
diff --git a/src/mongo/rpc/metadata/tracking_metadata.cpp b/src/mongo/rpc/metadata/tracking_metadata.cpp
index b284ceb8692..ba2fedb5d4d 100644
--- a/src/mongo/rpc/metadata/tracking_metadata.cpp
+++ b/src/mongo/rpc/metadata/tracking_metadata.cpp
@@ -99,9 +99,7 @@ StatusWith<TrackingMetadata> TrackingMetadata::readFromMetadata(const BSONElemen
} else if (metadataElem.type() != mongo::Object) {
return {ErrorCodes::TypeMismatch,
str::stream() << "TrackingMetadata element has incorrect type: expected"
- << mongo::Object
- << " but got "
- << metadataElem.type()};
+ << mongo::Object << " but got " << metadataElem.type()};
}
BSONObj metadataObj = metadataElem.Obj();
diff --git a/src/mongo/rpc/metadata/tracking_metadata_test.cpp b/src/mongo/rpc/metadata/tracking_metadata_test.cpp
index c2c2897a6f8..2244483dfb5 100644
--- a/src/mongo/rpc/metadata/tracking_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/tracking_metadata_test.cpp
@@ -52,9 +52,9 @@ const auto kParentOperId = "541b1a00e8a23afa832b2016";
TEST(TrackingMetadata, ReadFromMetadata) {
{
- auto metadata = checkParse(BSON(
- "tracking_info" << BSON("operId" << kOperId << "operName" << kOperName << "parentOperId"
- << kParentOperId)));
+ auto metadata =
+ checkParse(BSON("tracking_info" << BSON("operId" << kOperId << "operName" << kOperName
+ << "parentOperId" << kParentOperId)));
ASSERT_EQ(*metadata.getOperId(), kOperId);
ASSERT_EQ(*metadata.getParentOperId(), kParentOperId);
ASSERT_EQ(*metadata.getOperName(), kOperName);
@@ -78,8 +78,7 @@ TEST(TrackingMetadata, ReadFromInvalidMetadata) {
}
{
checkParseFails(BSON("tracking_info" << BSON("operId" << kOperId << "operName" << kOperName
- << "parentOperId"
- << 111)),
+ << "parentOperId" << 111)),
ErrorCodes::TypeMismatch);
}
}
diff --git a/src/mongo/rpc/metadata_test.cpp b/src/mongo/rpc/metadata_test.cpp
index f94802ac9f5..c0bac93aedd 100644
--- a/src/mongo/rpc/metadata_test.cpp
+++ b/src/mongo/rpc/metadata_test.cpp
@@ -72,8 +72,9 @@ TEST(Metadata, UpconvertValidMetadata) {
<< BSON("mode"
<< "secondary")),
mongo::QueryOption_SlaveOk,
- BSON("ping" << 1 << "$readPreference" << BSON("mode"
- << "secondary")));
+ BSON("ping" << 1 << "$readPreference"
+ << BSON("mode"
+ << "secondary")));
// Wrapped in 'query', with readPref.
checkUpconvert(BSON("query" << BSON("pong" << 1 << "foo"
@@ -121,16 +122,14 @@ TEST(Metadata, UpconvertInvalidMetadata) {
ASSERT_THROWS_CODE(upconvertRequest("db",
BSON("query" << BSON("foo"
<< "bar")
- << "$maxTimeMS"
- << 200),
+ << "$maxTimeMS" << 200),
0),
AssertionException,
ErrorCodes::InvalidOptions);
ASSERT_THROWS_CODE(upconvertRequest("db",
BSON("$query" << BSON("foo"
<< "bar")
- << "$maxTimeMS"
- << 200),
+ << "$maxTimeMS" << 200),
0),
AssertionException,
ErrorCodes::InvalidOptions);
diff --git a/src/mongo/rpc/object_check_test.cpp b/src/mongo/rpc/object_check_test.cpp
index 4006db63bc1..52010604f53 100644
--- a/src/mongo/rpc/object_check_test.cpp
+++ b/src/mongo/rpc/object_check_test.cpp
@@ -88,4 +88,4 @@ TEST(DataTypeValidated, BSONValidationEnabled) {
ASSERT_OK(cdrc.readAndAdvanceNoThrow(&v));
}
}
-}
+} // namespace
diff --git a/src/mongo/rpc/op_msg_integration_test.cpp b/src/mongo/rpc/op_msg_integration_test.cpp
index 561264b5db7..7aa41d66cb1 100644
--- a/src/mongo/rpc/op_msg_integration_test.cpp
+++ b/src/mongo/rpc/op_msg_integration_test.cpp
@@ -181,7 +181,8 @@ TEST(OpMsg, CloseConnectionOnFireAndForgetNotMasterError) {
documents: [
{a: 1}
]
- })")).serialize();
+ })"))
+ .serialize();
// Round-trip command fails with NotMaster error. Note that this failure is in command
// dispatch which ignores w:0.
diff --git a/src/mongo/rpc/op_msg_test.cpp b/src/mongo/rpc/op_msg_test.cpp
index 67c27f79bd1..ac82f369a76 100644
--- a/src/mongo/rpc/op_msg_test.cpp
+++ b/src/mongo/rpc/op_msg_test.cpp
@@ -183,11 +183,13 @@ const uint32_t kNoFlags = 0;
const uint32_t kHaveChecksum = 1;
TEST_F(OpMsgParser, SucceedsWithJustBody) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kBodySection,
- fromjson("{ping: 1}"),
- }.parse();
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kBodySection,
+ fromjson("{ping: 1}"),
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{ping: 1}"));
ASSERT_EQ(msg.sequences.size(), 0u);
@@ -205,18 +207,20 @@ TEST_F(OpMsgParser, SucceedsWithChecksum) {
}
TEST_F(OpMsgParser, SucceedsWithBodyThenSequence) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kBodySection,
- fromjson("{ping: 1}"),
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kBodySection,
+ fromjson("{ping: 1}"),
- kDocSequenceSection,
- Sized{
- "docs", //
- fromjson("{a: 1}"),
- fromjson("{a: 2}"),
- },
- }.parse();
+ kDocSequenceSection,
+ Sized{
+ "docs", //
+ fromjson("{a: 1}"),
+ fromjson("{a: 2}"),
+ },
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{ping: 1}"));
ASSERT_EQ(msg.sequences.size(), 1u);
@@ -227,17 +231,19 @@ TEST_F(OpMsgParser, SucceedsWithBodyThenSequence) {
}
TEST_F(OpMsgParser, SucceedsWithSequenceThenBody) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kDocSequenceSection,
- Sized{
- "docs", //
- fromjson("{a: 1}"),
- },
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kDocSequenceSection,
+ Sized{
+ "docs", //
+ fromjson("{a: 1}"),
+ },
- kBodySection,
- fromjson("{ping: 1}"),
- }.parse();
+ kBodySection,
+ fromjson("{ping: 1}"),
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{ping: 1}"));
ASSERT_EQ(msg.sequences.size(), 1u);
@@ -247,22 +253,24 @@ TEST_F(OpMsgParser, SucceedsWithSequenceThenBody) {
}
TEST_F(OpMsgParser, SucceedsWithSequenceThenBodyThenSequence) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kDocSequenceSection,
- Sized{
- "empty", //
- },
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kDocSequenceSection,
+ Sized{
+ "empty", //
+ },
- kBodySection,
- fromjson("{ping: 1}"),
+ kBodySection,
+ fromjson("{ping: 1}"),
- kDocSequenceSection,
- Sized{
- "docs", //
- fromjson("{a: 1}"),
- },
- }.parse();
+ kDocSequenceSection,
+ Sized{
+ "docs", //
+ fromjson("{a: 1}"),
+ },
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{ping: 1}"));
ASSERT_EQ(msg.sequences.size(), 2u);
@@ -274,22 +282,24 @@ TEST_F(OpMsgParser, SucceedsWithSequenceThenBodyThenSequence) {
}
TEST_F(OpMsgParser, SucceedsWithSequenceThenSequenceThenBody) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kDocSequenceSection,
- Sized{
- "empty", //
- },
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kDocSequenceSection,
+ Sized{
+ "empty", //
+ },
- kDocSequenceSection,
- Sized{
- "docs", //
- fromjson("{a: 1}"),
- },
+ kDocSequenceSection,
+ Sized{
+ "docs", //
+ fromjson("{a: 1}"),
+ },
- kBodySection,
- fromjson("{ping: 1}"),
- }.parse();
+ kBodySection,
+ fromjson("{ping: 1}"),
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{ping: 1}"));
ASSERT_EQ(msg.sequences.size(), 2u);
@@ -301,22 +311,24 @@ TEST_F(OpMsgParser, SucceedsWithSequenceThenSequenceThenBody) {
}
TEST_F(OpMsgParser, SucceedsWithBodyThenSequenceThenSequence) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kBodySection,
- fromjson("{ping: 1}"),
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kBodySection,
+ fromjson("{ping: 1}"),
- kDocSequenceSection,
- Sized{
- "docs", //
- fromjson("{a: 1}"),
- },
+ kDocSequenceSection,
+ Sized{
+ "docs", //
+ fromjson("{a: 1}"),
+ },
- kDocSequenceSection,
- Sized{
- "empty", //
- },
- }.parse();
+ kDocSequenceSection,
+ Sized{
+ "empty", //
+ },
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{ping: 1}"));
ASSERT_EQ(msg.sequences.size(), 2u);
@@ -402,17 +414,19 @@ TEST_F(OpMsgParser, FailsIfDuplicateSequenceWithBodyNested) {
}
TEST_F(OpMsgParser, SucceedsIfSequenceAndBodyHaveCommonPrefix) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kBodySection,
- fromjson("{cursor: {ns: 'foo.bar', id: 1}}"),
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kBodySection,
+ fromjson("{cursor: {ns: 'foo.bar', id: 1}}"),
- kDocSequenceSection,
- Sized{
- "cursor.firstBatch", //
- fromjson("{_id: 1}"),
- },
- }.parse();
+ kDocSequenceSection,
+ Sized{
+ "cursor.firstBatch", //
+ fromjson("{_id: 1}"),
+ },
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{cursor: {ns: 'foo.bar', id: 1}}"));
ASSERT_EQ(msg.sequences.size(), 1u);
@@ -432,11 +446,13 @@ TEST_F(OpMsgParser, FailsIfUnknownSectionKind) {
}
TEST_F(OpMsgParser, FailsIfBodyTooBig) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kBodySection,
- fromjson("{ping: 1}"),
- }.addToSize(-1); // Shrink message so body extends past end.
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kBodySection,
+ fromjson("{ping: 1}"),
+ }
+ .addToSize(-1); // Shrink message so body extends past end.
ASSERT_THROWS_CODE(msg.parse(), AssertionException, ErrorCodes::InvalidBSON);
}
@@ -447,24 +463,27 @@ TEST_F(OpMsgParser, FailsIfBodyTooBigIntoChecksum) {
kHaveChecksum, //
kBodySection,
fromjson("{ping: 1}"),
- }.appendChecksum()
+ }
+ .appendChecksum()
.addToSize(-1); // Shrink message so body extends past end.
ASSERT_THROWS_CODE(msg.parse(), AssertionException, ErrorCodes::InvalidBSON);
}
TEST_F(OpMsgParser, FailsIfDocumentSequenceTooBig) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kBodySection,
- fromjson("{ping: 1}"),
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kBodySection,
+ fromjson("{ping: 1}"),
- kDocSequenceSection,
- Sized{
- "docs", //
- fromjson("{a: 1}"),
- },
- }.addToSize(-1); // Shrink message so body extends past end.
+ kDocSequenceSection,
+ Sized{
+ "docs", //
+ fromjson("{a: 1}"),
+ },
+ }
+ .addToSize(-1); // Shrink message so body extends past end.
ASSERT_THROWS_CODE(msg.parse(), AssertionException, ErrorCodes::Overflow);
}
@@ -481,7 +500,8 @@ TEST_F(OpMsgParser, FailsIfDocumentSequenceTooBigIntoChecksum) {
"docs", //
fromjson("{a: 1}"),
},
- }.appendChecksum()
+ }
+ .appendChecksum()
.addToSize(-1); // Shrink message so body extends past end.
ASSERT_THROWS_CODE(msg.parse(), AssertionException, ErrorCodes::Overflow);
@@ -497,7 +517,8 @@ TEST_F(OpMsgParser, FailsIfDocumentInSequenceTooBig) {
Sized{
"docs", //
fromjson("{a: 1}"),
- }.addToSize(-1), // Shrink sequence so document extends past end.
+ }
+ .addToSize(-1), // Shrink sequence so document extends past end.
};
ASSERT_THROWS_CODE(msg.parse(), AssertionException, ErrorCodes::InvalidBSON);
@@ -512,7 +533,8 @@ TEST_F(OpMsgParser, FailsIfNameOfDocumentSequenceTooBig) {
kDocSequenceSection,
Sized{
"foo",
- }.addToSize(-1), // Shrink sequence so document extends past end.
+ }
+ .addToSize(-1), // Shrink sequence so document extends past end.
};
ASSERT_THROWS_CODE(msg.parse(), AssertionException, ErrorCodes::Overflow);
@@ -611,7 +633,8 @@ TEST_F(OpMsgParser, SucceedsWithUnknownOptionalFlags) {
flags, //
kBodySection,
fromjson("{ping: 1}"),
- }.parse();
+ }
+ .parse();
}
}
diff --git a/src/mongo/rpc/protocol.cpp b/src/mongo/rpc/protocol.cpp
index a578d342b00..c021e79140a 100644
--- a/src/mongo/rpc/protocol.cpp
+++ b/src/mongo/rpc/protocol.cpp
@@ -140,10 +140,7 @@ StatusWith<ProtocolSetAndWireVersionInfo> parseProtocolSetFromIsMasterReply(
maxWireVersion >= std::numeric_limits<int>::max()) {
return Status(ErrorCodes::IncompatibleServerVersion,
str::stream() << "Server min and max wire version have invalid values ("
- << minWireVersion
- << ","
- << maxWireVersion
- << ")");
+ << minWireVersion << "," << maxWireVersion << ")");
}
WireVersionInfo version{static_cast<int>(minWireVersion), static_cast<int>(maxWireVersion)};
@@ -176,11 +173,9 @@ Status validateWireVersion(const WireVersionInfo client, const WireVersionInfo s
// Server may return bad data.
if (server.minWireVersion > server.maxWireVersion) {
return Status(ErrorCodes::IncompatibleServerVersion,
- str::stream() << "Server min and max wire version are incorrect ("
- << server.minWireVersion
- << ","
- << server.maxWireVersion
- << ")");
+ str::stream()
+ << "Server min and max wire version are incorrect ("
+ << server.minWireVersion << "," << server.maxWireVersion << ")");
}
// Determine if the [min, max] tuples overlap.
diff --git a/src/mongo/rpc/protocol.h b/src/mongo/rpc/protocol.h
index 33d19486fcf..f81fcaa542b 100644
--- a/src/mongo/rpc/protocol.h
+++ b/src/mongo/rpc/protocol.h
@@ -133,8 +133,8 @@ StatusWith<ProtocolSetAndWireVersionInfo> parseProtocolSetFromIsMasterReply(
const BSONObj& isMasterReply);
/**
- * Computes supported protocols from wire versions.
- */
+ * Computes supported protocols from wire versions.
+ */
ProtocolSet computeProtocolSet(const WireVersionInfo version);
} // namespace rpc
diff --git a/src/mongo/rpc/protocol_test.cpp b/src/mongo/rpc/protocol_test.cpp
index 8acb3d1d01d..61ca6e894f7 100644
--- a/src/mongo/rpc/protocol_test.cpp
+++ b/src/mongo/rpc/protocol_test.cpp
@@ -39,8 +39,8 @@ namespace {
using mongo::WireVersion;
using namespace mongo::rpc;
-using mongo::unittest::assertGet;
using mongo::BSONObj;
+using mongo::unittest::assertGet;
// Checks if negotiation of the first to protocol sets results in the 'proto'
const auto assert_negotiated = [](ProtocolSet fst, ProtocolSet snd, Protocol proto) {
@@ -105,8 +105,7 @@ TEST(Protocol, parseProtocolSetFromIsMasterReply) {
auto mongos32 =
BSON("maxWireVersion" << static_cast<int>(WireVersion::COMMANDS_ACCEPT_WRITE_CONCERN)
<< "minWireVersion"
- << static_cast<int>(WireVersion::RELEASE_2_4_AND_BEFORE)
- << "msg"
+ << static_cast<int>(WireVersion::RELEASE_2_4_AND_BEFORE) << "msg"
<< "isdbgrid");
ASSERT_EQ(assertGet(parseProtocolSetFromIsMasterReply(mongos32)).protocolSet,
@@ -114,8 +113,8 @@ TEST(Protocol, parseProtocolSetFromIsMasterReply) {
}
{
// MongoDB 3.0 (mongod)
- auto mongod30 = BSON(
- "maxWireVersion" << static_cast<int>(WireVersion::RELEASE_2_7_7) << "minWireVersion"
+ auto mongod30 = BSON("maxWireVersion"
+ << static_cast<int>(WireVersion::RELEASE_2_7_7) << "minWireVersion"
<< static_cast<int>(WireVersion::RELEASE_2_4_AND_BEFORE));
ASSERT_EQ(assertGet(parseProtocolSetFromIsMasterReply(mongod30)).protocolSet,
supports::kOpQueryOnly);
diff --git a/src/mongo/rpc/write_concern_error_detail.cpp b/src/mongo/rpc/write_concern_error_detail.cpp
index aa134a9f1a0..477c7011430 100644
--- a/src/mongo/rpc/write_concern_error_detail.cpp
+++ b/src/mongo/rpc/write_concern_error_detail.cpp
@@ -137,8 +137,8 @@ Status WriteConcernErrorDetail::toStatus() const {
return _status;
}
- return _status.withReason(
- str::stream() << _status.reason() << "; Error details: " << _errInfo.toString());
+ return _status.withReason(str::stream()
+ << _status.reason() << "; Error details: " << _errInfo.toString());
}
void WriteConcernErrorDetail::setErrInfo(const BSONObj& errInfo) {
diff --git a/src/mongo/s/async_requests_sender.cpp b/src/mongo/s/async_requests_sender.cpp
index 0d14cf81015..5d6f0b21ccb 100644
--- a/src/mongo/s/async_requests_sender.cpp
+++ b/src/mongo/s/async_requests_sender.cpp
@@ -186,7 +186,7 @@ auto AsyncRequestsSender::RemoteData::scheduleRemoteCommand(std::vector<HostAndP
// We have to make a promise future pair because the TaskExecutor doesn't currently support a
// future returning variant of scheduleRemoteCommand
- auto[p, f] = makePromiseFuture<RemoteCommandOnAnyCallbackArgs>();
+ auto [p, f] = makePromiseFuture<RemoteCommandOnAnyCallbackArgs>();
// Failures to schedule skip the retry loop
uassertStatusOK(_ars->_subExecutor->scheduleRemoteCommandOnAny(
@@ -242,8 +242,9 @@ auto AsyncRequestsSender::RemoteData::handleResponse(RemoteCommandOnAnyCallbackA
_retryCount < kMaxNumFailedHostRetryAttempts) {
LOG(1) << "Command to remote " << _shardId
- << (failedTargets.empty() ? " " : (failedTargets.size() > 1 ? " for hosts "
- : " at host "))
+ << (failedTargets.empty()
+ ? " "
+ : (failedTargets.size() > 1 ? " for hosts " : " at host "))
<< "{}"_format(fmt::join(failedTargets, ", "))
<< "failed with retriable error and will be retried "
<< causedBy(redact(status));
diff --git a/src/mongo/s/balancer_configuration_test.cpp b/src/mongo/s/balancer_configuration_test.cpp
index b456aa29039..2081f9ec8b3 100644
--- a/src/mongo/s/balancer_configuration_test.cpp
+++ b/src/mongo/s/balancer_configuration_test.cpp
@@ -310,8 +310,7 @@ TEST(BalancerSettingsType, InvalidBalancingWindowTimeFormat) {
ASSERT_NOT_OK(BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("start"
<< "23:00"
- << "stop"
- << 6LL)))
+ << "stop" << 6LL)))
.getStatus());
}
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
index 7a4b6e1564a..d8574212532 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
@@ -93,8 +93,7 @@ StatusWith<BSONObj> extractFindAndModifyNewObj(StatusWith<Shard::CommandResponse
return {ErrorCodes::UnsupportedFormat,
str::stream() << "expected an object from the findAndModify response '"
<< kFindAndModifyResponseResultDocField
- << "'field, got: "
- << newDocElem};
+ << "'field, got: " << newDocElem};
}
return newDocElem.Obj().getOwned();
@@ -220,14 +219,10 @@ StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* opCtx,
Date_t time,
StringData why,
const WriteConcernOptions& writeConcern) {
- BSONObj newLockDetails(BSON(
- LocksType::lockID(lockSessionID) << LocksType::state(LocksType::LOCKED) << LocksType::who()
- << who
- << LocksType::process()
- << processId
- << LocksType::when(time)
- << LocksType::why()
- << why));
+ BSONObj newLockDetails(BSON(LocksType::lockID(lockSessionID)
+ << LocksType::state(LocksType::LOCKED) << LocksType::who() << who
+ << LocksType::process() << processId << LocksType::when(time)
+ << LocksType::why() << why));
auto request = FindAndModifyRequest::makeUpdate(
_locksNS,
@@ -281,14 +276,10 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* opCtx,
BSON(LocksType::name() << lockID << LocksType::state(LocksType::UNLOCKED)));
orQueryBuilder.append(BSON(LocksType::name() << lockID << LocksType::lockID(currentHolderTS)));
- BSONObj newLockDetails(BSON(
- LocksType::lockID(lockSessionID) << LocksType::state(LocksType::LOCKED) << LocksType::who()
- << who
- << LocksType::process()
- << processId
- << LocksType::when(time)
- << LocksType::why()
- << why));
+ BSONObj newLockDetails(BSON(LocksType::lockID(lockSessionID)
+ << LocksType::state(LocksType::LOCKED) << LocksType::who() << who
+ << LocksType::process() << processId << LocksType::when(time)
+ << LocksType::why() << why));
auto request = FindAndModifyRequest::makeUpdate(
_locksNS, BSON("$or" << orQueryBuilder.arr()), BSON("$set" << newLockDetails));
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
index f2eca5abcf7..5dae286da5a 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
@@ -53,14 +53,8 @@ void noGrabLockFuncSet(StringData lockID,
Date_t time,
StringData why) {
FAIL(str::stream() << "grabLock not expected to be called. "
- << "lockID: "
- << lockID
- << ", who: "
- << who
- << ", processId: "
- << processId
- << ", why: "
- << why);
+ << "lockID: " << lockID << ", who: " << who << ", processId: " << processId
+ << ", why: " << why);
}
void noOvertakeLockFuncSet(StringData lockID,
@@ -71,22 +65,13 @@ void noOvertakeLockFuncSet(StringData lockID,
Date_t time,
StringData why) {
FAIL(str::stream() << "overtakeLock not expected to be called. "
- << "lockID: "
- << lockID
- << ", currentHolderTS: "
- << currentHolderTS
- << ", who: "
- << who
- << ", processId: "
- << processId
- << ", why: "
- << why);
+ << "lockID: " << lockID << ", currentHolderTS: " << currentHolderTS
+ << ", who: " << who << ", processId: " << processId << ", why: " << why);
}
void noUnLockFuncSet(const OID& lockSessionID) {
FAIL(str::stream() << "unlock not expected to be called. "
- << "lockSessionID: "
- << lockSessionID);
+ << "lockSessionID: " << lockSessionID);
}
void noPingFuncSet(StringData processID, Date_t ping) {
@@ -95,26 +80,22 @@ void noPingFuncSet(StringData processID, Date_t ping) {
void noStopPingFuncSet(StringData processID) {
FAIL(str::stream() << "stopPing not expected to be called. "
- << "processID: "
- << processID);
+ << "processID: " << processID);
}
void noGetLockByTSSet(const OID& lockSessionID) {
FAIL(str::stream() << "getLockByTS not expected to be called. "
- << "lockSessionID: "
- << lockSessionID);
+ << "lockSessionID: " << lockSessionID);
}
void noGetLockByNameSet(StringData name) {
FAIL(str::stream() << "getLockByName not expected to be called. "
- << "lockName: "
- << name);
+ << "lockName: " << name);
}
void noGetPingSet(StringData processId) {
FAIL(str::stream() << "getPing not expected to be called. "
- << "lockName: "
- << processId);
+ << "lockName: " << processId);
}
void noGetServerInfoSet() {
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.h b/src/mongo/s/catalog/dist_lock_catalog_mock.h
index 4fd6562cdc5..faae634b09e 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.h
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.h
@@ -220,4 +220,4 @@ private:
GetServerInfoFunc _getServerInfoChecker;
StatusWith<DistLockCatalog::ServerInfo> _getServerInfoReturnValue;
};
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.cpp b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
index ceb0611669b..6a17de30fad 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
@@ -45,12 +45,8 @@ namespace {
void NoLockFuncSet(StringData name, StringData whyMessage, Milliseconds waitFor) {
FAIL(str::stream() << "Lock not expected to be called. "
- << "Name: "
- << name
- << ", whyMessage: "
- << whyMessage
- << ", waitFor: "
- << waitFor);
+ << "Name: " << name << ", whyMessage: " << whyMessage
+ << ", waitFor: " << waitFor);
}
} // namespace
diff --git a/src/mongo/s/catalog/dist_lock_ping_info.cpp b/src/mongo/s/catalog/dist_lock_ping_info.cpp
index c0643c1fa12..2549e55bb19 100644
--- a/src/mongo/s/catalog/dist_lock_ping_info.cpp
+++ b/src/mongo/s/catalog/dist_lock_ping_info.cpp
@@ -42,4 +42,4 @@ DistLockPingInfo::DistLockPingInfo(
configLocalTime(remoteArg),
lockSessionId(std::move(tsArg)),
electionId(std::move(electionIdArg)) {}
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_ping_info.h b/src/mongo/s/catalog/dist_lock_ping_info.h
index e3db046db20..6e236fb5133 100644
--- a/src/mongo/s/catalog/dist_lock_ping_info.h
+++ b/src/mongo/s/catalog/dist_lock_ping_info.h
@@ -64,4 +64,4 @@ struct DistLockPingInfo {
// Note: unused by legacy dist lock.
OID electionId;
};
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/mongo_version_range.cpp b/src/mongo/s/catalog/mongo_version_range.cpp
index c92fcb0b749..a0c5f505817 100644
--- a/src/mongo/s/catalog/mongo_version_range.cpp
+++ b/src/mongo/s/catalog/mongo_version_range.cpp
@@ -148,4 +148,4 @@ bool isInMongoVersionRanges(StringData version, const vector<MongoVersionRange>&
return false;
}
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/mongo_version_range.h b/src/mongo/s/catalog/mongo_version_range.h
index 5e8d79807a7..f995864a689 100644
--- a/src/mongo/s/catalog/mongo_version_range.h
+++ b/src/mongo/s/catalog/mongo_version_range.h
@@ -60,4 +60,4 @@ struct MongoVersionRange {
};
bool isInMongoVersionRanges(StringData version, const std::vector<MongoVersionRange>& ranges);
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 23db44c5453..2084959330e 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -260,14 +260,14 @@ StatusWith<repl::OpTimeWith<std::vector<DatabaseType>>> ShardingCatalogClientImp
for (const BSONObj& doc : findStatus.getValue().value) {
auto dbRes = DatabaseType::fromBSON(doc);
if (!dbRes.isOK()) {
- return dbRes.getStatus().withContext(stream() << "Failed to parse database document "
- << doc);
+ return dbRes.getStatus().withContext(stream()
+ << "Failed to parse database document " << doc);
}
Status validateStatus = dbRes.getValue().validate();
if (!validateStatus.isOK()) {
- return validateStatus.withContext(stream() << "Failed to validate database document "
- << doc);
+ return validateStatus.withContext(stream()
+ << "Failed to validate database document " << doc);
}
databases.push_back(dbRes.getValue());
@@ -377,9 +377,7 @@ StatusWith<std::vector<CollectionType>> ShardingCatalogClientImpl::getCollection
if (!collectionResult.isOK()) {
return {ErrorCodes::FailedToParse,
str::stream() << "error while parsing " << CollectionType::ConfigNS.ns()
- << " document: "
- << obj
- << " : "
+ << " document: " << obj << " : "
<< collectionResult.getStatus().toString()};
}
@@ -591,14 +589,14 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::
for (const BSONObj& doc : findStatus.getValue().value) {
auto shardRes = ShardType::fromBSON(doc);
if (!shardRes.isOK()) {
- return shardRes.getStatus().withContext(stream() << "Failed to parse shard document "
- << doc);
+ return shardRes.getStatus().withContext(stream()
+ << "Failed to parse shard document " << doc);
}
Status validateStatus = shardRes.getValue().validate();
if (!validateStatus.isOK()) {
- return validateStatus.withContext(stream() << "Failed to validate shard document "
- << doc);
+ return validateStatus.withContext(stream()
+ << "Failed to validate shard document " << doc);
}
shards.push_back(shardRes.getValue());
@@ -714,9 +712,9 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt
invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer ||
(readConcern == repl::ReadConcernLevel::kMajorityReadConcern &&
writeConcern.wMode == WriteConcernOptions::kMajority));
- BSONObj cmd = BSON("applyOps" << updateOps << "preCondition" << preCondition
- << WriteConcernOptions::kWriteConcernField
- << writeConcern.toBSON());
+ BSONObj cmd =
+ BSON("applyOps" << updateOps << "preCondition" << preCondition
+ << WriteConcernOptions::kWriteConcernField << writeConcern.toBSON());
auto response =
Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
@@ -773,11 +771,11 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt
const auto& newestChunk = chunkWithStatus.getValue();
if (newestChunk.empty()) {
- errMsg = str::stream() << "chunk operation commit failed: version "
- << lastChunkVersion.toString()
- << " doesn't exist in namespace: " << nss.ns()
- << ". Unable to save chunk ops. Command: " << cmd
- << ". Result: " << response.getValue().response;
+ errMsg = str::stream()
+ << "chunk operation commit failed: version " << lastChunkVersion.toString()
+ << " doesn't exist in namespace: " << nss.ns()
+ << ". Unable to save chunk ops. Command: " << cmd
+ << ". Result: " << response.getValue().response;
return status.withContext(errMsg);
};
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index ce7790031e3..0885054f185 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -67,8 +67,8 @@ using executor::NetworkInterfaceMock;
using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
using executor::TaskExecutor;
-using rpc::ReplSetMetadata;
using repl::OpTime;
+using rpc::ReplSetMetadata;
using std::vector;
using unittest::assertGet;
@@ -101,7 +101,6 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
onFindWithMetadataCommand(
[this, &expectedColl, newOpTime](const RemoteCommandRequest& request) {
-
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
@@ -599,10 +598,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandSuccess) {
<< "writeConcern"
<< BSON("w"
<< "majority"
- << "wtimeout"
- << 0)
- << "maxTimeMS"
- << 30000),
+ << "wtimeout" << 0)
+ << "maxTimeMS" << 30000),
request.cmdObj);
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1),
@@ -622,14 +619,14 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandInvalidWriteConce
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
BSONObjBuilder responseBuilder;
- bool ok = catalogClient()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"
- << "writeConcern"
- << BSON("w" << 2)),
- &responseBuilder);
+ bool ok =
+ catalogClient()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern" << BSON("w" << 2)),
+ &responseBuilder);
ASSERT_FALSE(ok);
Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
@@ -648,22 +645,23 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandRewriteWriteConce
},
Status::OK());
- auto future = launchAsync([this] {
- BSONObjBuilder responseBuilder;
- bool ok = catalogClient()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"
- << "writeConcern"
- << BSON("w" << 1 << "wtimeout"
- << 30)),
- &responseBuilder);
- ASSERT_FALSE(ok);
-
- Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
- ASSERT_EQUALS(ErrorCodes::UserNotFound, commandStatus);
- });
+ auto future =
+ launchAsync([this] {
+ BSONObjBuilder responseBuilder;
+ bool ok =
+ catalogClient()->runUserManagementWriteCommand(
+ operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern" << BSON("w" << 1 << "wtimeout" << 30)),
+ &responseBuilder);
+ ASSERT_FALSE(ok);
+
+ Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
+ ASSERT_EQUALS(ErrorCodes::UserNotFound, commandStatus);
+ });
onCommand([](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
@@ -672,10 +670,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandRewriteWriteConce
<< "writeConcern"
<< BSON("w"
<< "majority"
- << "wtimeout"
- << 30)
- << "maxTimeMS"
- << 30000),
+ << "wtimeout" << 30)
+ << "maxTimeMS" << 30000),
request.cmdObj);
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1),
@@ -763,10 +759,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMasterRetrySuc
<< "writeConcern"
<< BSON("w"
<< "majority"
- << "wtimeout"
- << 0)
- << "maxTimeMS"
- << 30000),
+ << "wtimeout" << 0)
+ << "maxTimeMS" << 30000),
request.cmdObj);
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1),
@@ -801,7 +795,6 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
const OpTime newOpTime(Timestamp(7, 6), 5);
auto future = launchAsync([this, newOpTime] {
-
OpTime opTime;
const auto& collections =
assertGet(catalogClient()->getCollections(operationContext(), nullptr, &opTime));
@@ -1202,8 +1195,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessful) {
ASSERT_EQUALS("config", request.dbname);
ASSERT_BSONOBJ_EQ(BSON("w"
<< "majority"
- << "wtimeout"
- << 60000),
+ << "wtimeout" << 60000),
request.cmdObj["writeConcern"].Obj());
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
diff --git a/src/mongo/s/catalog/type_changelog_test.cpp b/src/mongo/s/catalog/type_changelog_test.cpp
index b2a2b522299..3142901d06a 100644
--- a/src/mongo/s/catalog/type_changelog_test.cpp
+++ b/src/mongo/s/catalog/type_changelog_test.cpp
@@ -46,12 +46,10 @@ TEST(ChangeLogType, Empty) {
TEST(ChangeLogType, Valid) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -77,8 +75,7 @@ TEST(ChangeLogType, MissingChangeId) {
<< ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -91,8 +88,7 @@ TEST(ChangeLogType, MissingServer) {
<< ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -102,11 +98,9 @@ TEST(ChangeLogType, MissingServer) {
TEST(ChangeLogType, MissingClientAddr) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -116,11 +110,9 @@ TEST(ChangeLogType, MissingClientAddr) {
TEST(ChangeLogType, MissingTime) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -130,8 +122,7 @@ TEST(ChangeLogType, MissingTime) {
TEST(ChangeLogType, MissingWhat) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
<< ChangeLogType::ns("test.test")
@@ -143,14 +134,13 @@ TEST(ChangeLogType, MissingWhat) {
}
TEST(ChangeLogType, MissingNS) {
- BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
- << ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj =
+ BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::what("split")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_OK(changeLogResult.getStatus());
@@ -170,12 +160,10 @@ TEST(ChangeLogType, MissingNS) {
TEST(ChangeLogType, MissingDetails) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test"));
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test"));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
@@ -186,8 +174,7 @@ TEST(ChangeLogType, MissingShard) {
<< ChangeLogType::server("host.local")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index c627bd7c824..fbe61500c82 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -68,8 +68,8 @@ const char kMaxKey[] = "max";
Status extractObject(const BSONObj& obj, const std::string& fieldName, BSONElement* bsonElement) {
Status elementStatus = bsonExtractTypedField(obj, fieldName, Object, bsonElement);
if (!elementStatus.isOK()) {
- return elementStatus.withContext(str::stream() << "The field '" << fieldName
- << "' cannot be parsed");
+ return elementStatus.withContext(str::stream()
+ << "The field '" << fieldName << "' cannot be parsed");
}
if (bsonElement->Obj().isEmpty()) {
@@ -108,8 +108,8 @@ StatusWith<ChunkRange> ChunkRange::fromBSON(const BSONObj& obj) {
if (SimpleBSONObjComparator::kInstance.evaluate(minKey.Obj() >= maxKey.Obj())) {
return {ErrorCodes::FailedToParse,
- str::stream() << "min: " << minKey.Obj() << " should be less than max: "
- << maxKey.Obj()};
+ str::stream() << "min: " << minKey.Obj()
+ << " should be less than max: " << maxKey.Obj()};
}
return ChunkRange(minKey.Obj().getOwned(), maxKey.Obj().getOwned());
@@ -135,8 +135,7 @@ const Status ChunkRange::extractKeyPattern(KeyPattern* shardKeyPatternOut) const
(!min.more() && max.more())) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "the shard key of min " << _minKey << " doesn't match with "
- << "the shard key of max "
- << _maxKey};
+ << "the shard key of max " << _maxKey};
}
b.append(x.fieldName(), 1);
}
@@ -348,8 +347,8 @@ StatusWith<ChunkType> ChunkType::fromShardBSON(const BSONObj& source, const OID&
if (SimpleBSONObjComparator::kInstance.evaluate(minKey.Obj() >= maxKey.Obj())) {
return {ErrorCodes::FailedToParse,
- str::stream() << "min: " << minKey.Obj() << " should be less than max: "
- << maxKey.Obj()};
+ str::stream() << "min: " << minKey.Obj()
+ << " should be less than max: " << maxKey.Obj()};
}
chunk._min = minKey.Obj().getOwned();
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index 3e51c306bd5..3c424f815cc 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -50,41 +50,32 @@ TEST(ChunkType, MissingConfigRequiredFields) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj objModNS =
- BSON(ChunkType::name(OID::gen().toString()) << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name(OID::gen().toString())
+ << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
+ << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
+ << chunkVersion.epoch() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModKeys =
- BSON(ChunkType::name(OID::gen().toString()) << ChunkType::ns("test.mycol") << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name(OID::gen().toString())
+ << ChunkType::ns("test.mycol") << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
chunkRes = ChunkType::fromConfigBSON(objModKeys);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModShard =
- BSON(ChunkType::name(OID::gen().toString()) << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch());
+ BSON(ChunkType::name(OID::gen().toString())
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch());
chunkRes = ChunkType::fromConfigBSON(objModShard);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModVersion =
- BSON(ChunkType::name(OID::gen().toString()) << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20))
- << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name(OID::gen().toString())
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001"));
chunkRes = ChunkType::fromConfigBSON(objModVersion);
ASSERT_FALSE(chunkRes.isOK());
}
@@ -100,8 +91,8 @@ TEST(ChunkType, MissingShardRequiredFields) {
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::minShardID.name());
- BSONObj objModMax = BSON(
- ChunkType::minShardID(kMin) << ChunkType::shard(kShard.toString()) << "lastmod" << lastmod);
+ BSONObj objModMax = BSON(ChunkType::minShardID(kMin)
+ << ChunkType::shard(kShard.toString()) << "lastmod" << lastmod);
chunkRes = ChunkType::fromShardBSON(objModMax, epoch);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::max.name());
@@ -112,8 +103,8 @@ TEST(ChunkType, MissingShardRequiredFields) {
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::shard.name());
- BSONObj objModLastmod = BSON(
- ChunkType::minShardID(kMin) << ChunkType::max(kMax) << ChunkType::shard(kShard.toString()));
+ BSONObj objModLastmod = BSON(ChunkType::minShardID(kMin)
+ << ChunkType::max(kMax) << ChunkType::shard(kShard.toString()));
chunkRes = ChunkType::fromShardBSON(objModLastmod, epoch);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
}
@@ -123,10 +114,9 @@ TEST(ChunkType, ToFromShardBSON) {
ChunkVersion chunkVersion(1, 2, epoch);
auto lastmod = Timestamp(chunkVersion.toLong());
- BSONObj obj = BSON(ChunkType::minShardID(kMin) << ChunkType::max(kMax)
- << ChunkType::shard(kShard.toString())
- << "lastmod"
- << lastmod);
+ BSONObj obj = BSON(ChunkType::minShardID(kMin)
+ << ChunkType::max(kMax) << ChunkType::shard(kShard.toString()) << "lastmod"
+ << lastmod);
ChunkType shardChunk = assertGet(ChunkType::fromShardBSON(obj, epoch));
ASSERT_BSONOBJ_EQ(obj, shardChunk.toShardBSON());
@@ -140,14 +130,10 @@ TEST(ChunkType, ToFromShardBSON) {
TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
- BSON(ChunkType::name(OID::gen().toString()) << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name(OID::gen().toString())
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -155,14 +141,11 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj = BSON(ChunkType::name(OID::gen().toString()) << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("b" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSONObj obj =
+ BSON(ChunkType::name(OID::gen().toString())
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
+ << ChunkType::max(BSON("b" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -170,14 +153,11 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
TEST(ChunkType, MinToMaxNotAscending) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj = BSON(ChunkType::name(OID::gen().toString()) << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 20))
- << ChunkType::max(BSON("a" << 10))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSONObj obj =
+ BSON(ChunkType::name(OID::gen().toString())
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 20))
+ << ChunkType::max(BSON("a" << 10)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_EQ(ErrorCodes::FailedToParse, chunkRes.getStatus());
}
@@ -185,14 +165,11 @@ TEST(ChunkType, MinToMaxNotAscending) {
TEST(ChunkType, ToFromConfigBSON) {
const std::string chunkID = OID::gen().toString();
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj = BSON(ChunkType::name(chunkID) << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("a" << 20))
- << ChunkType::shard("shard0001")
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch());
+ BSONObj obj =
+ BSON(ChunkType::name(chunkID)
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
+ << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001") << "lastmod"
+ << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch());
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ChunkType chunk = chunkRes.getValue();
diff --git a/src/mongo/s/catalog/type_collection_test.cpp b/src/mongo/s/catalog/type_collection_test.cpp
index 68c1e73b096..9130562aaac 100644
--- a/src/mongo/s/catalog/type_collection_test.cpp
+++ b/src/mongo/s/catalog/type_collection_test.cpp
@@ -48,14 +48,13 @@ TEST(CollectionType, Empty) {
TEST(CollectionType, Basic) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::defaultCollation(BSON("locale"
- << "fr_CA"))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::defaultCollation(BSON("locale"
+ << "fr_CA"))
+ << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -76,18 +75,14 @@ TEST(CollectionType, Basic) {
TEST(CollectionType, AllFieldsPresent) {
const OID oid = OID::gen();
const auto uuid = UUID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::defaultCollation(BSON("locale"
- << "fr_CA"))
- << CollectionType::unique(true)
- << CollectionType::uuid()
- << uuid
- << "isAssignedShardKey"
- << false));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::defaultCollation(BSON("locale"
+ << "fr_CA"))
+ << CollectionType::unique(true) << CollectionType::uuid() << uuid << "isAssignedShardKey"
+ << false));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -109,24 +104,20 @@ TEST(CollectionType, AllFieldsPresent) {
TEST(CollectionType, EmptyDefaultCollationFailsToParse) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::defaultCollation(BSONObj())
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::defaultCollation(BSONObj())
+ << CollectionType::unique(true)));
ASSERT_FALSE(status.isOK());
}
TEST(CollectionType, MissingDefaultCollationParses) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -136,14 +127,13 @@ TEST(CollectionType, MissingDefaultCollationParses) {
TEST(CollectionType, DefaultCollationSerializesCorrectly) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::defaultCollation(BSON("locale"
- << "fr_CA"))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::defaultCollation(BSON("locale"
+ << "fr_CA"))
+ << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -156,12 +146,10 @@ TEST(CollectionType, DefaultCollationSerializesCorrectly) {
TEST(CollectionType, MissingDefaultCollationIsNotSerialized) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -194,16 +182,11 @@ TEST(CollectionType, EpochCorrectness) {
}
TEST(CollectionType, Pre22Format) {
- CollectionType coll = assertGet(CollectionType::fromBSON(BSON("_id"
- << "db.coll"
- << "lastmod"
- << Date_t::fromMillisSinceEpoch(1)
- << "dropped"
- << false
- << "key"
- << BSON("a" << 1)
- << "unique"
- << false)));
+ CollectionType coll = assertGet(
+ CollectionType::fromBSON(BSON("_id"
+ << "db.coll"
+ << "lastmod" << Date_t::fromMillisSinceEpoch(1) << "dropped"
+ << false << "key" << BSON("a" << 1) << "unique" << false)));
ASSERT(coll.getNs() == NamespaceString{"db.coll"});
ASSERT(!coll.getEpoch().isSet());
@@ -216,12 +199,10 @@ TEST(CollectionType, Pre22Format) {
TEST(CollectionType, InvalidCollectionNamespace) {
const OID oid = OID::gen();
- StatusWith<CollectionType> result =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("foo\\bar.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> result = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("foo\\bar.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
ASSERT_TRUE(result.isOK());
CollectionType collType = result.getValue();
ASSERT_FALSE(collType.validate().isOK());
@@ -230,10 +211,10 @@ TEST(CollectionType, InvalidCollectionNamespace) {
TEST(CollectionType, BadType) {
const OID oid = OID::gen();
StatusWith<CollectionType> status = CollectionType::fromBSON(
- BSON(CollectionType::fullNs() << 1 << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::unique(true)));
+ BSON(CollectionType::fullNs()
+ << 1 << CollectionType::epoch(oid)
+ << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
ASSERT_FALSE(status.isOK());
}
diff --git a/src/mongo/s/catalog/type_config_version_test.cpp b/src/mongo/s/catalog/type_config_version_test.cpp
index df3a9486f64..6bc2d7eaac2 100644
--- a/src/mongo/s/catalog/type_config_version_test.cpp
+++ b/src/mongo/s/catalog/type_config_version_test.cpp
@@ -254,10 +254,10 @@ TEST(Excludes, BadRangeArray) {
<< "1.2.3"); // empty bound
BSONArray includeArr = bab.arr();
- auto versionInfoResult = VersionType::fromBSON(BSON(
- VersionType::minCompatibleVersion(3) << VersionType::currentVersion(4)
- << VersionType::clusterId(OID::gen())
- << VersionType::excludingMongoVersions(includeArr)));
+ auto versionInfoResult = VersionType::fromBSON(
+ BSON(VersionType::minCompatibleVersion(3)
+ << VersionType::currentVersion(4) << VersionType::clusterId(OID::gen())
+ << VersionType::excludingMongoVersions(includeArr)));
ASSERT_EQ(ErrorCodes::FailedToParse, versionInfoResult.getStatus());
}
diff --git a/src/mongo/s/catalog/type_database.cpp b/src/mongo/s/catalog/type_database.cpp
index 5dbeb34ab7d..2caf60f308f 100644
--- a/src/mongo/s/catalog/type_database.cpp
+++ b/src/mongo/s/catalog/type_database.cpp
@@ -83,10 +83,10 @@ StatusWith<DatabaseType> DatabaseType::fromBSON(const BSONObj& source) {
BSONObj versionField = source.getObjectField("version");
if (versionField.isEmpty()) {
return Status{ErrorCodes::InternalError,
- str::stream() << "DatabaseVersion doesn't exist in database entry "
- << source
- << " despite the config server being in binary version 4.2 "
- "or later."};
+ str::stream()
+ << "DatabaseVersion doesn't exist in database entry " << source
+ << " despite the config server being in binary version 4.2 "
+ "or later."};
}
dbtVersion = DatabaseVersion::parse(IDLParserErrorContext("DatabaseType"), versionField);
}
diff --git a/src/mongo/s/catalog/type_database_test.cpp b/src/mongo/s/catalog/type_database_test.cpp
index e4e4b046232..8a9eb73dcda 100644
--- a/src/mongo/s/catalog/type_database_test.cpp
+++ b/src/mongo/s/catalog/type_database_test.cpp
@@ -49,8 +49,7 @@ TEST(DatabaseType, Basic) {
UUID uuid = UUID::gen();
StatusWith<DatabaseType> status = DatabaseType::fromBSON(
BSON(DatabaseType::name("mydb")
- << DatabaseType::primary("shard")
- << DatabaseType::sharded(true)
+ << DatabaseType::primary("shard") << DatabaseType::sharded(true)
<< DatabaseType::version(BSON("uuid" << uuid << "lastMod" << 0))));
ASSERT_TRUE(status.isOK());
diff --git a/src/mongo/s/catalog/type_locks_test.cpp b/src/mongo/s/catalog/type_locks_test.cpp
index b00ffe06c0e..b249bb648a9 100644
--- a/src/mongo/s/catalog/type_locks_test.cpp
+++ b/src/mongo/s/catalog/type_locks_test.cpp
@@ -46,12 +46,12 @@ TEST(Validity, Empty) {
TEST(Validity, UnlockedWithOptional) {
OID testLockID = OID::gen();
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::UNLOCKED)
- << LocksType::lockID(testLockID)
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("twiddling thumbs"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::UNLOCKED) << LocksType::lockID(testLockID)
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("twiddling thumbs"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -78,12 +78,12 @@ TEST(Validity, UnlockedWithoutOptional) {
TEST(Validity, LockedValid) {
OID testLockID = OID::gen();
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(testLockID)
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(testLockID)
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -98,11 +98,11 @@ TEST(Validity, LockedValid) {
}
TEST(Validity, LockedMissingProcess) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -124,11 +124,10 @@ TEST(Validity, LockedMissingLockID) {
}
TEST(Validity, LockedMissingWho) {
- BSONObj obj =
- BSON(LocksType::name("dummy") << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(OID::gen())
- << LocksType::why("twiddling thumbs"));
+ BSONObj obj = BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED)
+ << LocksType::lockID(OID::gen()) << LocksType::why("twiddling thumbs"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -137,11 +136,11 @@ TEST(Validity, LockedMissingWho) {
}
TEST(Validity, LockedMissingWhy) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -150,12 +149,12 @@ TEST(Validity, LockedMissingWhy) {
}
TEST(Validity, ContestedValid) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("twiddling thumbs"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("twiddling thumbs"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -164,11 +163,11 @@ TEST(Validity, ContestedValid) {
}
TEST(Validity, ContestedMissingProcess) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("twiddling thumbs"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("twiddling thumbs"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -190,11 +189,10 @@ TEST(Validity, ContestedMissingLockID) {
}
TEST(Validity, ContestedMissingWho) {
- BSONObj obj =
- BSON(LocksType::name("dummy") << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen())
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP)
+ << LocksType::lockID(OID::gen()) << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -203,11 +201,11 @@ TEST(Validity, ContestedMissingWho) {
}
TEST(Validity, ContestedMissingWhy) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
diff --git a/src/mongo/s/catalog/type_mongos_test.cpp b/src/mongo/s/catalog/type_mongos_test.cpp
index a253ed68d00..7007305f412 100644
--- a/src/mongo/s/catalog/type_mongos_test.cpp
+++ b/src/mongo/s/catalog/type_mongos_test.cpp
@@ -41,10 +41,8 @@ using namespace mongo;
TEST(Validity, MissingName) {
BSONObj obj = BSON(MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
@@ -53,10 +51,8 @@ TEST(Validity, MissingName) {
TEST(Validity, MissingPing) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
@@ -64,36 +60,33 @@ TEST(Validity, MissingPing) {
}
TEST(Validity, MissingUp) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
- << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
+ BSONObj obj =
+ BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
+ << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingWaiting) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
- << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
+ BSONObj obj =
+ BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
+ << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingMongoVersion) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::configVersion(0)
- << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
+ BSONObj obj =
+ BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
+ << MongosType::waiting(false) << MongosType::configVersion(0)
+ << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -107,12 +100,11 @@ TEST(Validity, MissingMongoVersion) {
}
TEST(Validity, MissingConfigVersion) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
+ BSONObj obj =
+ BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
+ << MongosType::waiting(false) << MongosType::mongoVersion("x.x.x")
+ << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -128,10 +120,8 @@ TEST(Validity, MissingConfigVersion) {
TEST(Validity, MissingAdvisoryHostFQDNs) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0));
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -144,10 +134,8 @@ TEST(Validity, MissingAdvisoryHostFQDNs) {
TEST(Validity, EmptyAdvisoryHostFQDNs) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
@@ -162,10 +150,8 @@ TEST(Validity, EmptyAdvisoryHostFQDNs) {
TEST(Validity, BadTypeAdvisoryHostFQDNs) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSON_ARRAY("foo" << 0 << "baz")));
auto mongosTypeResult = MongosType::fromBSON(obj);
@@ -175,10 +161,8 @@ TEST(Validity, BadTypeAdvisoryHostFQDNs) {
TEST(Validity, Valid) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSON_ARRAY("foo"
<< "bar"
<< "baz")));
diff --git a/src/mongo/s/catalog/type_shard_collection.h b/src/mongo/s/catalog/type_shard_collection.h
index 95ec62e7775..af96e3d7785 100644
--- a/src/mongo/s/catalog/type_shard_collection.h
+++ b/src/mongo/s/catalog/type_shard_collection.h
@@ -47,24 +47,24 @@ public:
using ShardCollectionTypeBase::kUuidFieldName;
// Make getters and setters accessible.
+ using ShardCollectionTypeBase::getDefaultCollation;
+ using ShardCollectionTypeBase::getEnterCriticalSectionCounter;
+ using ShardCollectionTypeBase::getEpoch;
+ using ShardCollectionTypeBase::getKeyPattern;
+ using ShardCollectionTypeBase::getLastRefreshedCollectionVersion;
using ShardCollectionTypeBase::getNss;
- using ShardCollectionTypeBase::setNss;
+ using ShardCollectionTypeBase::getRefreshing;
+ using ShardCollectionTypeBase::getUnique;
using ShardCollectionTypeBase::getUuid;
- using ShardCollectionTypeBase::setUuid;
- using ShardCollectionTypeBase::getEpoch;
+ using ShardCollectionTypeBase::setDefaultCollation;
+ using ShardCollectionTypeBase::setEnterCriticalSectionCounter;
using ShardCollectionTypeBase::setEpoch;
- using ShardCollectionTypeBase::getKeyPattern;
using ShardCollectionTypeBase::setKeyPattern;
- using ShardCollectionTypeBase::getDefaultCollation;
- using ShardCollectionTypeBase::setDefaultCollation;
- using ShardCollectionTypeBase::getUnique;
- using ShardCollectionTypeBase::setUnique;
- using ShardCollectionTypeBase::getRefreshing;
- using ShardCollectionTypeBase::setRefreshing;
- using ShardCollectionTypeBase::getLastRefreshedCollectionVersion;
using ShardCollectionTypeBase::setLastRefreshedCollectionVersion;
- using ShardCollectionTypeBase::getEnterCriticalSectionCounter;
- using ShardCollectionTypeBase::setEnterCriticalSectionCounter;
+ using ShardCollectionTypeBase::setNss;
+ using ShardCollectionTypeBase::setRefreshing;
+ using ShardCollectionTypeBase::setUnique;
+ using ShardCollectionTypeBase::setUuid;
ShardCollectionType() : ShardCollectionTypeBase() {}
diff --git a/src/mongo/s/catalog/type_shard_database.cpp b/src/mongo/s/catalog/type_shard_database.cpp
index 268460023e2..059516dc3ac 100644
--- a/src/mongo/s/catalog/type_shard_database.cpp
+++ b/src/mongo/s/catalog/type_shard_database.cpp
@@ -65,10 +65,10 @@ StatusWith<ShardDatabaseType> ShardDatabaseType::fromBSON(const BSONObj& source)
BSONObj versionField = source.getObjectField("version");
if (versionField.isEmpty()) {
return Status{ErrorCodes::InternalError,
- str::stream() << "DatabaseVersion doesn't exist in database entry "
- << source
- << " despite the shard being in binary version 4.2 or "
- "later."};
+ str::stream()
+ << "DatabaseVersion doesn't exist in database entry " << source
+ << " despite the shard being in binary version 4.2 or "
+ "later."};
}
dbVersion = DatabaseVersion::parse(IDLParserErrorContext("DatabaseType"), versionField);
}
diff --git a/src/mongo/s/catalog/type_shard_test.cpp b/src/mongo/s/catalog/type_shard_test.cpp
index b39725e9c0f..d2c9ab0326e 100644
--- a/src/mongo/s/catalog/type_shard_test.cpp
+++ b/src/mongo/s/catalog/type_shard_test.cpp
@@ -62,9 +62,9 @@ TEST(ShardType, OnlyMandatory) {
}
TEST(ShardType, AllOptionalsPresent) {
- BSONObj obj = BSON(ShardType::name("shard0000") << ShardType::host("localhost:27017")
- << ShardType::draining(true)
- << ShardType::maxSizeMB(100));
+ BSONObj obj = BSON(ShardType::name("shard0000")
+ << ShardType::host("localhost:27017") << ShardType::draining(true)
+ << ShardType::maxSizeMB(100));
StatusWith<ShardType> shardRes = ShardType::fromBSON(obj);
ASSERT(shardRes.isOK());
ShardType shard = shardRes.getValue();
@@ -72,9 +72,8 @@ TEST(ShardType, AllOptionalsPresent) {
}
TEST(ShardType, MaxSizeAsFloat) {
- BSONObj obj = BSON(ShardType::name("shard0000") << ShardType::host("localhost:27017")
- << ShardType::maxSizeMB()
- << 100.0);
+ BSONObj obj = BSON(ShardType::name("shard0000")
+ << ShardType::host("localhost:27017") << ShardType::maxSizeMB() << 100.0);
StatusWith<ShardType> shardRes = ShardType::fromBSON(obj);
ASSERT(shardRes.isOK());
ShardType shard = shardRes.getValue();
diff --git a/src/mongo/s/catalog/type_tags_test.cpp b/src/mongo/s/catalog/type_tags_test.cpp
index f466fc234ae..1cd8ed6d276 100644
--- a/src/mongo/s/catalog/type_tags_test.cpp
+++ b/src/mongo/s/catalog/type_tags_test.cpp
@@ -58,8 +58,8 @@ TEST(TagsType, Valid) {
}
TEST(TagsType, MissingNsField) {
- BSONObj obj = BSON(TagsType::tag("tag") << TagsType::min(BSON("a" << 10))
- << TagsType::max(BSON("a" << 20)));
+ BSONObj obj = BSON(TagsType::tag("tag")
+ << TagsType::min(BSON("a" << 10)) << TagsType::max(BSON("a" << 20)));
StatusWith<TagsType> status = TagsType::fromBSON(obj);
ASSERT_FALSE(status.isOK());
@@ -67,8 +67,8 @@ TEST(TagsType, MissingNsField) {
}
TEST(TagsType, MissingTagField) {
- BSONObj obj = BSON(TagsType::ns("test.mycol") << TagsType::min(BSON("a" << 10))
- << TagsType::max(BSON("a" << 20)));
+ BSONObj obj = BSON(TagsType::ns("test.mycol")
+ << TagsType::min(BSON("a" << 10)) << TagsType::max(BSON("a" << 20)));
StatusWith<TagsType> status = TagsType::fromBSON(obj);
ASSERT_FALSE(status.isOK());
@@ -94,9 +94,9 @@ TEST(TagsType, MissingMaxKey) {
}
TEST(TagsType, KeysWithDifferentNumberOfColumns) {
- BSONObj obj = BSON(TagsType::ns("test.mycol") << TagsType::tag("tag")
- << TagsType::min(BSON("a" << 10 << "b" << 10))
- << TagsType::max(BSON("a" << 20)));
+ BSONObj obj = BSON(TagsType::ns("test.mycol")
+ << TagsType::tag("tag") << TagsType::min(BSON("a" << 10 << "b" << 10))
+ << TagsType::max(BSON("a" << 20)));
StatusWith<TagsType> status = TagsType::fromBSON(obj);
const TagsType& tag = status.getValue();
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index e4a151f9ccc..c2206848332 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -373,8 +373,7 @@ void CatalogCache::checkEpochOrThrow(const NamespaceString& nss,
const auto itDb = _collectionsByDb.find(nss.db());
uassert(StaleConfigInfo(nss, targetCollectionVersion, boost::none),
str::stream() << "could not act as router for " << nss.ns()
- << ", no entry for database "
- << nss.db(),
+ << ", no entry for database " << nss.db(),
itDb != _collectionsByDb.end());
auto itColl = itDb->second.find(nss.ns());
@@ -392,8 +391,7 @@ void CatalogCache::checkEpochOrThrow(const NamespaceString& nss,
auto foundVersion = itColl->second->routingInfo->getVersion();
uassert(StaleConfigInfo(nss, targetCollectionVersion, foundVersion),
str::stream() << "could not act as router for " << nss.ns() << ", wanted "
- << targetCollectionVersion.toString()
- << ", but found "
+ << targetCollectionVersion.toString() << ", but found "
<< foundVersion.toString(),
foundVersion.epoch() == targetCollectionVersion.epoch());
}
@@ -467,8 +465,8 @@ void CatalogCache::report(BSONObjBuilder* builder) const {
void CatalogCache::_scheduleDatabaseRefresh(WithLock lk,
const std::string& dbName,
std::shared_ptr<DatabaseInfoEntry> dbEntry) {
- const auto onRefreshCompleted =
- [ this, t = Timer(), dbName, dbEntry ](const StatusWith<DatabaseType>& swDbt) {
+ const auto onRefreshCompleted = [this, t = Timer(), dbName, dbEntry](
+ const StatusWith<DatabaseType>& swDbt) {
// TODO (SERVER-34164): Track and increment stats for database refreshes.
if (!swDbt.isOK()) {
LOG_CATALOG_REFRESH(0) << "Refresh for database " << dbName << " took " << t.millis()
@@ -556,8 +554,9 @@ void CatalogCache::_scheduleCollectionRefresh(WithLock lk,
}
// Invoked when one iteration of getChunksSince has completed, whether with success or error
- const auto onRefreshCompleted = [ this, t = Timer(), nss, isIncremental, existingRoutingInfo ](
- const Status& status, RoutingTableHistory* routingInfoAfterRefresh) {
+ const auto onRefreshCompleted = [this, t = Timer(), nss, isIncremental, existingRoutingInfo](
+ const Status& status,
+ RoutingTableHistory* routingInfoAfterRefresh) {
if (isIncremental) {
_stats.numActiveIncrementalRefreshes.subtractAndFetch(1);
} else {
@@ -570,9 +569,10 @@ void CatalogCache::_scheduleCollectionRefresh(WithLock lk,
LOG_CATALOG_REFRESH(0) << "Refresh for collection " << nss << " took " << t.millis()
<< " ms and failed" << causedBy(redact(status));
} else if (routingInfoAfterRefresh) {
- const int logLevel = (!existingRoutingInfo || (existingRoutingInfo &&
- routingInfoAfterRefresh->getVersion() !=
- existingRoutingInfo->getVersion()))
+ const int logLevel =
+ (!existingRoutingInfo ||
+ (existingRoutingInfo &&
+ routingInfoAfterRefresh->getVersion() != existingRoutingInfo->getVersion()))
? 0
: 1;
LOG_CATALOG_REFRESH(logLevel)
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 6524460ad41..647742c3408 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -88,8 +88,7 @@ void ChunkInfo::throwIfMovedSince(const Timestamp& ts) const {
uasserted(ErrorCodes::MigrationConflict,
str::stream() << "Chunk has moved since timestamp: " << ts.toString()
- << ", most recently at timestamp: "
- << latestValidAfter.toString());
+ << ", most recently at timestamp: " << latestValidAfter.toString());
}
bool ChunkInfo::containsKey(const BSONObj& shardKey) const {
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index f2147267cba..acf4df0ede8 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -454,15 +454,13 @@ ShardVersionMap RoutingTableHistory::_constructShardVersionMap() const {
str::stream()
<< "Gap exists in the routing table between chunks "
<< _chunkMap.at(_extractKeyString(*lastMax))->getRange().toString()
- << " and "
- << rangeLast->second->getRange().toString());
+ << " and " << rangeLast->second->getRange().toString());
else
uasserted(ErrorCodes::ConflictingOperationInProgress,
str::stream()
<< "Overlap exists in the routing table between chunks "
<< _chunkMap.at(_extractKeyString(*lastMax))->getRange().toString()
- << " and "
- << rangeLast->second->getRange().toString());
+ << " and " << rangeLast->second->getRange().toString());
}
if (!firstMin)
diff --git a/src/mongo/s/chunk_manager_index_bounds_test.cpp b/src/mongo/s/chunk_manager_index_bounds_test.cpp
index c551d06a29a..1e9dfeb858a 100644
--- a/src/mongo/s/chunk_manager_index_bounds_test.cpp
+++ b/src/mongo/s/chunk_manager_index_bounds_test.cpp
@@ -320,8 +320,7 @@ TEST_F(CMCollapseTreeTest, Regex) {
OrderedIntervalList expected;
expected.intervals.push_back(Interval(BSON(""
<< ""
- << ""
- << BSONObj()),
+ << "" << BSONObj()),
true,
false));
BSONObjBuilder builder;
diff --git a/src/mongo/s/client/parallel.cpp b/src/mongo/s/client/parallel.cpp
index 3320e4517b7..c532d4b5a23 100644
--- a/src/mongo/s/client/parallel.cpp
+++ b/src/mongo/s/client/parallel.cpp
@@ -49,9 +49,9 @@
namespace mongo {
-using std::shared_ptr;
using std::map;
using std::set;
+using std::shared_ptr;
using std::string;
using std::vector;
@@ -566,10 +566,11 @@ void ParallelSortClusteredCursor::startInit(OperationContext* opCtx) {
// shard or if we keep better track of chunks, we can actually add the skip
// value into the cursor and/or make some assumptions about the return value
// size ( (batch size + skip amount) / num_servers ).
- _qSpec.ntoreturn() == 0 ? 0 : (_qSpec.ntoreturn() > 0
- ? _qSpec.ntoreturn() + _qSpec.ntoskip()
- : _qSpec.ntoreturn() -
- _qSpec.ntoskip()))); // batchSize
+ _qSpec.ntoreturn() == 0
+ ? 0
+ : (_qSpec.ntoreturn() > 0
+ ? _qSpec.ntoreturn() + _qSpec.ntoskip()
+ : _qSpec.ntoreturn() - _qSpec.ntoskip()))); // batchSize
} else {
// Single shard query
@@ -598,9 +599,9 @@ void ParallelSortClusteredCursor::startInit(OperationContext* opCtx) {
// Without full initialization, throw an exception
uassert(15987,
- str::stream() << "could not fully initialize cursor on shard " << shardId
- << ", current connection state is "
- << mdata.toBSON().toString(),
+ str::stream()
+ << "could not fully initialize cursor on shard " << shardId
+ << ", current connection state is " << mdata.toBSON().toString(),
success);
mdata.retryNext = false;
@@ -993,8 +994,7 @@ void ParallelSortClusteredCursor::_oldInit(OperationContext* opCtx) {
// Version is zero b/c this is deprecated codepath
staleConfigExs.push_back(str::stream() << "stale config detected for " << _ns
- << " in ParallelCursor::_init "
- << errLoc);
+ << " in ParallelCursor::_init " << errLoc);
break;
}
@@ -1056,8 +1056,8 @@ void ParallelSortClusteredCursor::_oldInit(OperationContext* opCtx) {
_cursors[i].reset(nullptr, nullptr);
if (!retry) {
- socketExs.push_back(str::stream() << "error querying server: "
- << servers[i]);
+ socketExs.push_back(str::stream()
+ << "error querying server: " << servers[i]);
conns[i]->done();
} else {
retryQueries.insert(i);
@@ -1277,12 +1277,7 @@ void ParallelConnectionMetadata::cleanup(bool full) {
BSONObj ParallelConnectionMetadata::toBSON() const {
return BSON("state" << (pcState ? pcState->toBSON() : BSONObj()) << "retryNext" << retryNext
- << "init"
- << initialized
- << "finish"
- << finished
- << "errored"
- << errored);
+ << "init" << initialized << "finish" << finished << "errored" << errored);
}
std::string ParallelConnectionState::toString() const {
diff --git a/src/mongo/s/client/shard.h b/src/mongo/s/client/shard.h
index 69db3fdbc87..44a2c48c43b 100644
--- a/src/mongo/s/client/shard.h
+++ b/src/mongo/s/client/shard.h
@@ -205,9 +205,9 @@ public:
const BSONObj& cmdObj) = 0;
/**
- * Runs a cursor command, exhausts the cursor, and pulls all data into memory. Performs retries
- * if the command fails in accordance with the kIdempotent RetryPolicy.
- */
+ * Runs a cursor command, exhausts the cursor, and pulls all data into memory. Performs retries
+ * if the command fails in accordance with the kIdempotent RetryPolicy.
+ */
StatusWith<QueryResponse> runExhaustiveCursorCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
@@ -225,13 +225,13 @@ public:
RetryPolicy retryPolicy);
/**
- * Warning: This method exhausts the cursor and pulls all data into memory.
- * Do not use other than for very small (i.e., admin or metadata) collections.
- * Performs retries if the query fails in accordance with the kIdempotent RetryPolicy.
- *
- * ShardRemote instances expect "readConcernLevel" to always be kMajorityReadConcern, whereas
- * ShardLocal instances expect either kLocalReadConcern or kMajorityReadConcern.
- */
+ * Warning: This method exhausts the cursor and pulls all data into memory.
+ * Do not use other than for very small (i.e., admin or metadata) collections.
+ * Performs retries if the query fails in accordance with the kIdempotent RetryPolicy.
+ *
+ * ShardRemote instances expect "readConcernLevel" to always be kMajorityReadConcern, whereas
+ * ShardLocal instances expect either kLocalReadConcern or kMajorityReadConcern.
+ */
StatusWith<QueryResponse> exhaustiveFindOnConfig(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 7187eb01998..a5deb5da0f3 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -65,17 +65,17 @@
namespace mongo {
-using std::shared_ptr;
using std::set;
+using std::shared_ptr;
using std::string;
using std::unique_ptr;
using std::vector;
using executor::NetworkInterface;
using executor::NetworkInterfaceThreadPool;
+using executor::TaskExecutor;
using executor::TaskExecutorPool;
using executor::ThreadPoolTaskExecutor;
-using executor::TaskExecutor;
using CallbackArgs = TaskExecutor::CallbackArgs;
using CallbackHandle = TaskExecutor::CallbackHandle;
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index 362160babec..8602c3d31d2 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -261,7 +261,6 @@ StatusWith<Shard::QueryResponse> ShardRemote::_runExhaustiveCursorCommand(
auto fetcherCallback = [&status, &response](const Fetcher::QueryResponseStatus& dataStatus,
Fetcher::NextAction* nextAction,
BSONObjBuilder* getMoreBob) {
-
// Throw out any accumulated results on error
if (!dataStatus.isOK()) {
status = dataStatus.getStatus();
diff --git a/src/mongo/s/client/shard_remote.h b/src/mongo/s/client/shard_remote.h
index e58ec0a8809..3b19fd8ab0f 100644
--- a/src/mongo/s/client/shard_remote.h
+++ b/src/mongo/s/client/shard_remote.h
@@ -136,10 +136,10 @@ private:
mutable stdx::mutex _lastCommittedOpTimeMutex;
/**
- * Logical time representing the latest opTime timestamp known to be in this shard's majority
- * committed snapshot. Only the latest time is kept because lagged secondaries may return earlier
- * times.
- */
+ * Logical time representing the latest opTime timestamp known to be in this shard's majority
+ * committed snapshot. Only the latest time is kept because lagged secondaries may return
+ * earlier times.
+ */
LogicalTime _lastCommittedOpTime;
/**
diff --git a/src/mongo/s/client/sharding_connection_hook.cpp b/src/mongo/s/client/sharding_connection_hook.cpp
index c16190f3949..cbdad3a1257 100644
--- a/src/mongo/s/client/sharding_connection_hook.cpp
+++ b/src/mongo/s/client/sharding_connection_hook.cpp
@@ -99,9 +99,7 @@ void ShardingConnectionHook::onCreate(DBClientBase* conn) {
uassert(28785,
str::stream() << "Unrecognized configsvr mode number: " << configServerModeNumber
<< ". Range of known configsvr mode numbers is: ["
- << minKnownConfigServerMode
- << ", "
- << maxKnownConfigServerMode
+ << minKnownConfigServerMode << ", " << maxKnownConfigServerMode
<< "]",
configServerModeNumber >= minKnownConfigServerMode &&
configServerModeNumber <= maxKnownConfigServerMode);
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index f1cdfbc5e6b..1ffed487065 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -48,8 +48,8 @@
namespace mongo {
-using std::shared_ptr;
using std::map;
+using std::shared_ptr;
using std::string;
namespace {
@@ -302,33 +302,24 @@ bool checkShardVersion(OperationContext* opCtx,
const ChunkVersion refVersion(refManager->getVersion(shard->getId()));
const ChunkVersion currentVersion(manager->getVersion(shard->getId()));
- string msg(str::stream() << "manager (" << currentVersion.toString() << " : "
- << manager->getSequenceNumber()
- << ") "
- << "not compatible with reference manager ("
- << refVersion.toString()
- << " : "
- << refManager->getSequenceNumber()
- << ") "
- << "on shard "
- << shard->getId()
- << " ("
- << shard->getConnString().toString()
- << ")");
+ string msg(str::stream()
+ << "manager (" << currentVersion.toString() << " : "
+ << manager->getSequenceNumber() << ") "
+ << "not compatible with reference manager (" << refVersion.toString()
+ << " : " << refManager->getSequenceNumber() << ") "
+ << "on shard " << shard->getId() << " (" << shard->getConnString().toString()
+ << ")");
uasserted(StaleConfigInfo(nss, refVersion, currentVersion), msg);
}
} else if (refManager) {
- string msg(str::stream() << "not sharded (" << (!manager ? string("<none>") : str::stream()
- << manager->getSequenceNumber())
+ string msg(str::stream() << "not sharded ("
+ << (!manager ? string("<none>")
+ : str::stream() << manager->getSequenceNumber())
<< ") but has reference manager ("
- << refManager->getSequenceNumber()
- << ") "
- << "on conn "
- << conn->getServerAddress()
- << " ("
- << conn_in->getServerAddress()
- << ")");
+ << refManager->getSequenceNumber() << ") "
+ << "on conn " << conn->getServerAddress() << " ("
+ << conn_in->getServerAddress() << ")");
uasserted(
StaleConfigInfo(nss, refManager->getVersion(shard->getId()), ChunkVersion::UNSHARDED()),
diff --git a/src/mongo/s/cluster_commands_helpers.cpp b/src/mongo/s/cluster_commands_helpers.cpp
index 4c1dd33cf26..b58db5e41da 100644
--- a/src/mongo/s/cluster_commands_helpers.cpp
+++ b/src/mongo/s/cluster_commands_helpers.cpp
@@ -206,15 +206,13 @@ std::vector<AsyncRequestsSender::Response> gatherResponses(
if (ErrorCodes::isStaleShardVersionError(status.code())) {
uassertStatusOK(status.withContext(str::stream()
<< "got stale shardVersion response from shard "
- << response.shardId
- << " at host "
+ << response.shardId << " at host "
<< response.shardHostAndPort->toString()));
}
if (ErrorCodes::StaleDbVersion == status) {
uassertStatusOK(status.withContext(
str::stream() << "got stale databaseVersion response from shard "
- << response.shardId
- << " at host "
+ << response.shardId << " at host "
<< response.shardHostAndPort->toString()));
}
@@ -528,8 +526,8 @@ void createShardDatabase(OperationContext* opCtx, StringData dbName) {
if (createDbResponse.commandStatus != ErrorCodes::NamespaceExists) {
uassertStatusOKWithContext(createDbResponse.commandStatus,
- str::stream() << "Database " << dbName
- << " could not be created");
+ str::stream()
+ << "Database " << dbName << " could not be created");
}
dbStatus = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index 872adb0028a..67e486f9b3a 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -133,7 +133,11 @@ public:
}
const std::initializer_list<StringData> passthroughFields = {
- "$queryOptions", "collation", "hint", "readConcern", QueryRequest::cmdOptionMaxTimeMS,
+ "$queryOptions",
+ "collation",
+ "hint",
+ "readConcern",
+ QueryRequest::cmdOptionMaxTimeMS,
};
for (auto name : passthroughFields) {
if (auto field = cmdObj[name]) {
diff --git a/src/mongo/s/commands/cluster_data_size_cmd.cpp b/src/mongo/s/commands/cluster_data_size_cmd.cpp
index 6666ccda065..c8d410e1634 100644
--- a/src/mongo/s/commands/cluster_data_size_cmd.cpp
+++ b/src/mongo/s/commands/cluster_data_size_cmd.cpp
@@ -86,9 +86,10 @@ public:
uassert(ErrorCodes::BadValue,
"keyPattern must be empty or must be an object that equals the shard key",
- !keyPattern || (keyPattern.type() == Object &&
- SimpleBSONObjComparator::kInstance.evaluate(
- cm->getShardKeyPattern().toBSON() == keyPattern.Obj())));
+ !keyPattern ||
+ (keyPattern.type() == Object &&
+ SimpleBSONObjComparator::kInstance.evaluate(
+ cm->getShardKeyPattern().toBSON() == keyPattern.Obj())));
uassert(ErrorCodes::BadValue,
str::stream() << "min value " << min << " does not have shard key",
diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp
index fb3b748f4c5..3082a34fc7b 100644
--- a/src/mongo/s/commands/cluster_explain.cpp
+++ b/src/mongo/s/commands/cluster_explain.cpp
@@ -165,17 +165,16 @@ Status ClusterExplain::validateShardResults(const vector<Strategy::CommandResult
for (size_t i = 0; i < shardResults.size(); i++) {
auto status = getStatusFromCommandResult(shardResults[i].result);
if (!status.isOK()) {
- return status.withContext(str::stream() << "Explain command on shard "
- << shardResults[i].target.toString()
- << " failed");
+ return status.withContext(str::stream()
+ << "Explain command on shard "
+ << shardResults[i].target.toString() << " failed");
}
if (Object != shardResults[i].result["queryPlanner"].type()) {
return Status(ErrorCodes::OperationFailed,
- str::stream() << "Explain command on shard "
- << shardResults[i].target.toString()
- << " failed, caused by: "
- << shardResults[i].result);
+ str::stream()
+ << "Explain command on shard " << shardResults[i].target.toString()
+ << " failed, caused by: " << shardResults[i].result);
}
if (shardResults[i].result.hasField("executionStats")) {
@@ -197,9 +196,9 @@ Status ClusterExplain::validateShardResults(const vector<Strategy::CommandResult
// Either all shards should have all plans execution stats, or none should.
if (0 != numShardsAllPlansStats && shardResults.size() != numShardsAllPlansStats) {
return Status(ErrorCodes::InternalError,
- str::stream() << "Only " << numShardsAllPlansStats << "/"
- << shardResults.size()
- << " had allPlansExecution explain information.");
+ str::stream()
+ << "Only " << numShardsAllPlansStats << "/" << shardResults.size()
+ << " had allPlansExecution explain information.");
}
return Status::OK();
diff --git a/src/mongo/s/commands/cluster_explain_cmd.cpp b/src/mongo/s/commands/cluster_explain_cmd.cpp
index 8c4ccdd9e94..09882eeb642 100644
--- a/src/mongo/s/commands/cluster_explain_cmd.cpp
+++ b/src/mongo/s/commands/cluster_explain_cmd.cpp
@@ -148,8 +148,7 @@ BSONObj makeExplainedObj(const BSONObj& outerObj, StringData dbName) {
if (auto innerDb = innerObj["$db"]) {
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Mismatched $db in explain command. Expected " << dbName
- << " but got "
- << innerDb.checkAndGetStringData(),
+ << " but got " << innerDb.checkAndGetStringData(),
innerDb.checkAndGetStringData() == dbName);
}
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index dd726a64cc6..e133875439b 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -49,8 +49,8 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
const char kTermField[] = "term";
diff --git a/src/mongo/s/commands/cluster_find_test.cpp b/src/mongo/s/commands/cluster_find_test.cpp
index 7ebb923448f..8d0dc6792d4 100644
--- a/src/mongo/s/commands/cluster_find_test.cpp
+++ b/src/mongo/s/commands/cluster_find_test.cpp
@@ -41,8 +41,7 @@ protected:
<< "coll");
const BSONObj kFindCmdTargeted = BSON("find"
<< "coll"
- << "filter"
- << BSON("_id" << 0));
+ << "filter" << BSON("_id" << 0));
// The index of the shard expected to receive the response is used to prevent different shards
// from returning documents with the same shard key. This is expected to be 0 for queries
diff --git a/src/mongo/s/commands/cluster_kill_op.cpp b/src/mongo/s/commands/cluster_kill_op.cpp
index c22cbe06ef9..89e00b7d497 100644
--- a/src/mongo/s/commands/cluster_kill_op.cpp
+++ b/src/mongo/s/commands/cluster_kill_op.cpp
@@ -86,9 +86,7 @@ private:
uassert(28625,
str::stream() << "The op argument to killOp must be of the format shardid:opid"
- << " but found \""
- << opToKill
- << '"',
+ << " but found \"" << opToKill << '"',
(opToKill.size() >= 3) && // must have at least N:N
(opSepPos != std::string::npos) && // must have ':' as separator
(opSepPos != 0) && // can't be :NN
diff --git a/src/mongo/s/commands/cluster_map_reduce.cpp b/src/mongo/s/commands/cluster_map_reduce.cpp
index 7322292ab2f..c413e14490b 100644
--- a/src/mongo/s/commands/cluster_map_reduce.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce.cpp
@@ -364,8 +364,7 @@ bool runMapReduce(OperationContext* opCtx,
opCtx, dbname, shardedCommand, nss.ns(), q, collation, &mrCommandResults);
} catch (DBException& e) {
e.addContext(str::stream() << "could not run map command on all shards for ns " << nss.ns()
- << " and query "
- << q);
+ << " and query " << q);
throw;
}
@@ -396,8 +395,8 @@ bool runMapReduce(OperationContext* opCtx,
if (!ok) {
// At this point we will return
- errmsg = str::stream() << "MR parallel processing failed: "
- << singleResult.toString();
+ errmsg = str::stream()
+ << "MR parallel processing failed: " << singleResult.toString();
continue;
}
@@ -515,11 +514,11 @@ bool runMapReduce(OperationContext* opCtx,
// the output collection exists and is unsharded, fail because we should not go
// from unsharded to sharded.
BSONObj listCollsCmdResponse;
- ok = conn->runCommand(
- outDB,
- BSON("listCollections" << 1 << "filter"
- << BSON("name" << outputCollNss.coll())),
- listCollsCmdResponse);
+ ok =
+ conn->runCommand(outDB,
+ BSON("listCollections"
+ << 1 << "filter" << BSON("name" << outputCollNss.coll())),
+ listCollsCmdResponse);
BSONObj cursorObj = listCollsCmdResponse.getObjectField("cursor");
BSONObj collections = cursorObj["firstBatch"].Obj();
@@ -592,9 +591,7 @@ bool runMapReduce(OperationContext* opCtx,
ok = true;
} catch (DBException& e) {
e.addContext(str::stream() << "could not run final reduce on all shards for "
- << nss.ns()
- << ", output "
- << outputCollNss.ns());
+ << nss.ns() << ", output " << outputCollNss.ns());
throw;
}
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index ef20c6dde10..c686efce22e 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -138,10 +138,10 @@ public:
if (!cm->getShardKeyPattern().isShardKey(minKey) ||
!cm->getShardKeyPattern().isShardKey(maxKey)) {
- errmsg = str::stream() << "shard key bounds "
- << "[" << minKey << "," << maxKey << ")"
- << " are not valid for shard key pattern "
- << cm->getShardKeyPattern().toBSON();
+ errmsg = str::stream()
+ << "shard key bounds "
+ << "[" << minKey << "," << maxKey << ")"
+ << " are not valid for shard key pattern " << cm->getShardKeyPattern().toBSON();
return false;
}
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index 9b87c67733a..81400604b41 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -116,10 +116,9 @@ public:
const auto toStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, toString);
if (!toStatus.isOK()) {
- std::string msg(str::stream() << "Could not move chunk in '" << nss.ns()
- << "' to shard '"
- << toString
- << "' because that shard does not exist");
+ std::string msg(str::stream()
+ << "Could not move chunk in '" << nss.ns() << "' to shard '" << toString
+ << "' because that shard does not exist");
log() << msg;
uasserted(ErrorCodes::ShardNotFound, msg);
}
@@ -158,10 +157,10 @@ public:
// bounds
if (!cm->getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
!cm->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
- errmsg = str::stream() << "shard key bounds "
- << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
- << " are not valid for shard key pattern "
- << cm->getShardKeyPattern().toBSON();
+ errmsg = str::stream()
+ << "shard key bounds "
+ << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
+ << " are not valid for shard key pattern " << cm->getShardKeyPattern().toBSON();
return false;
}
diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp
index a3eb246a2b1..47f42767a5a 100644
--- a/src/mongo/s/commands/cluster_split_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_cmd.cpp
@@ -205,10 +205,10 @@ public:
// bounds
if (!cm->getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
!cm->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
- errmsg = str::stream() << "shard key bounds "
- << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
- << " are not valid for shard key pattern "
- << cm->getShardKeyPattern().toBSON();
+ errmsg = str::stream()
+ << "shard key bounds "
+ << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
+ << " are not valid for shard key pattern " << cm->getShardKeyPattern().toBSON();
return false;
}
@@ -225,9 +225,9 @@ public:
} else {
// middle
if (!cm->getShardKeyPattern().isShardKey(middle)) {
- errmsg = str::stream() << "new split key " << middle
- << " is not valid for shard key pattern "
- << cm->getShardKeyPattern().toBSON();
+ errmsg = str::stream()
+ << "new split key " << middle << " is not valid for shard key pattern "
+ << cm->getShardKeyPattern().toBSON();
return false;
}
@@ -239,9 +239,9 @@ public:
chunk.emplace(cm->findIntersectingChunkWithSimpleCollation(middle));
if (chunk->getMin().woCompare(middle) == 0 || chunk->getMax().woCompare(middle) == 0) {
- errmsg = str::stream() << "new split key " << middle
- << " is a boundary key of existing chunk "
- << "[" << chunk->getMin() << "," << chunk->getMax() << ")";
+ errmsg = str::stream()
+ << "new split key " << middle << " is a boundary key of existing chunk "
+ << "[" << chunk->getMin() << "," << chunk->getMax() << ")";
return false;
}
}
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 0b2a89a985e..86847ce82d1 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -503,8 +503,8 @@ public:
}
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to list indexes on collection: "
- << ns.coll());
+ str::stream()
+ << "Not authorized to list indexes on collection: " << ns.coll());
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index ba45f6cca80..b917da533c1 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -190,9 +190,7 @@ void addContextForTransactionAbortingError(StringData txnIdAsString,
DBException& ex,
StringData reason) {
ex.addContext(str::stream() << "Transaction " << txnIdAsString << " was aborted on statement "
- << latestStmtId
- << " due to: "
- << reason);
+ << latestStmtId << " due to: " << reason);
}
void execCommandClient(OperationContext* opCtx,
@@ -646,9 +644,7 @@ DbResponse Strategy::queryOp(OperationContext* opCtx, const NamespaceString& nss
if (q.queryOptions & QueryOption_Exhaust) {
uasserted(18526,
str::stream() << "The 'exhaust' query option is invalid for mongos queries: "
- << nss.ns()
- << " "
- << q.query.toString());
+ << nss.ns() << " " << q.query.toString());
}
// Determine the default read preference mode based on the value of the slaveOk flag.
@@ -873,9 +869,7 @@ void Strategy::killCursors(OperationContext* opCtx, DbMessage* dbm) {
const int numCursors = dbm->pullInt();
massert(34425,
str::stream() << "Invalid killCursors message. numCursors: " << numCursors
- << ", message size: "
- << dbm->msg().dataSize()
- << ".",
+ << ", message size: " << dbm->msg().dataSize() << ".",
dbm->msg().dataSize() == 8 + (8 * numCursors));
uassert(28794,
str::stream() << "numCursors must be between 1 and 29999. numCursors: " << numCursors
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
index bc9d9abfd23..97e2ccef518 100644
--- a/src/mongo/s/grid.cpp
+++ b/src/mongo/s/grid.cpp
@@ -130,8 +130,9 @@ boost::optional<repl::OpTime> Grid::advanceConfigOpTime(OperationContext* opCtx,
if (opCtx && opCtx->getClient()) {
clientAddr = opCtx->getClient()->clientAddress(true);
}
- log() << "Received " << what << " " << clientAddr << " indicating config server optime "
- "term has increased, previous optime "
+ log() << "Received " << what << " " << clientAddr
+ << " indicating config server optime "
+ "term has increased, previous optime "
<< prevOpTime << ", now " << opTime;
}
return prevOpTime;
diff --git a/src/mongo/s/mongos_options.h b/src/mongo/s/mongos_options.h
index b7adce6e829..97c3bc53e34 100644
--- a/src/mongo/s/mongos_options.h
+++ b/src/mongo/s/mongos_options.h
@@ -78,4 +78,4 @@ Status validateMongosOptions(const moe::Environment& params);
Status canonicalizeMongosOptions(moe::Environment* params);
Status storeMongosOptions(const moe::Environment& params);
-}
+} // namespace mongo
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index 8ee68933d49..f96f9a635b6 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -644,15 +644,13 @@ bool AsyncResultsMerger::_addBatchToBuffer(WithLock lk,
remote.status =
Status(ErrorCodes::InternalError,
str::stream() << "Missing field '" << AsyncResultsMerger::kSortKeyField
- << "' in document: "
- << obj);
+ << "' in document: " << obj);
return false;
} else if (!_params.getCompareWholeSortKey() && key.type() != BSONType::Object) {
remote.status =
Status(ErrorCodes::InternalError,
str::stream() << "Field '" << AsyncResultsMerger::kSortKeyField
- << "' was not of type Object in document: "
- << obj);
+ << "' was not of type Object in document: " << obj);
return false;
}
}
diff --git a/src/mongo/s/query/async_results_merger_test.cpp b/src/mongo/s/query/async_results_merger_test.cpp
index 909bdf85d79..575f3cc8509 100644
--- a/src/mongo/s/query/async_results_merger_test.cpp
+++ b/src/mongo/s/query/async_results_merger_test.cpp
@@ -43,7 +43,6 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/query/results_merger_test_fixture.h"
#include "mongo/unittest/death_test.h"
-#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -1324,8 +1323,7 @@ TEST_F(AsyncResultsMergerTest, GetMoreRequestIncludesMaxTimeMS) {
// The next getMore request should include the maxTimeMS.
expectedCmdObj = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "maxTimeMS"
- << 789);
+ << "maxTimeMS" << 789);
ASSERT_BSONOBJ_EQ(getNthPendingRequest(0).cmdObj, expectedCmdObj);
// Clean up.
@@ -1346,11 +1344,10 @@ DEATH_TEST_F(AsyncResultsMergerTest,
// Create one cursor whose initial response has a postBatchResumeToken.
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 5));
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1380,11 +1377,10 @@ DEATH_TEST_F(AsyncResultsMergerTest,
std::vector<RemoteCursor> cursors;
BSONObj pbrtFirstCursor;
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1409,11 +1405,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNotReadyIfRemoteHasLowerPostB
std::vector<RemoteCursor> cursors;
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 5));
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1461,11 +1456,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNewShardOrderedAfterExisting)
std::vector<CursorResponse> responses;
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 6));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
std::vector<BSONObj> batch1 = {firstCursorResponse};
auto firstDoc = batch1.front();
responses.emplace_back(kTestNss, CursorId(123), batch1, boost::none, pbrtFirstCursor);
@@ -1491,11 +1485,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNewShardOrderedAfterExisting)
responses.clear();
auto secondDocSortKey = makeResumeToken(Timestamp(1, 5), uuid, BSON("_id" << 2));
auto pbrtSecondCursor = makePostBatchResumeToken(Timestamp(1, 6));
- auto secondCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 5)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 2}}, $sortKey: {'': '"
- << secondDocSortKey.firstElement().String()
- << "'}}");
+ auto secondCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 5)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 2}}, $sortKey: {'': '"
+ << secondDocSortKey.firstElement().String() << "'}}");
std::vector<BSONObj> batch2 = {secondCursorResponse};
auto secondDoc = batch2.front();
responses.emplace_back(kTestNss, CursorId(456), batch2, boost::none, pbrtSecondCursor);
@@ -1541,11 +1534,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNewShardOrderedBeforeExisting
std::vector<CursorResponse> responses;
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 5));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
std::vector<BSONObj> batch1 = {firstCursorResponse};
responses.emplace_back(kTestNss, CursorId(123), batch1, boost::none, pbrtFirstCursor);
scheduleNetworkResponses(std::move(responses));
@@ -1570,11 +1562,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNewShardOrderedBeforeExisting
responses.clear();
auto secondDocSortKey = makeResumeToken(Timestamp(1, 3), uuid, BSON("_id" << 2));
auto pbrtSecondCursor = makePostBatchResumeToken(Timestamp(1, 5));
- auto secondCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 3)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 2}}, $sortKey: {'': '"
- << secondDocSortKey.firstElement().String()
- << "'}}");
+ auto secondCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 3)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 2}}, $sortKey: {'': '"
+ << secondDocSortKey.firstElement().String() << "'}}");
std::vector<BSONObj> batch2 = {secondCursorResponse};
// The last observed time should still be later than the first shard, so we can get the data
// from it.
diff --git a/src/mongo/s/query/blocking_results_merger_test.cpp b/src/mongo/s/query/blocking_results_merger_test.cpp
index 2c269fd2d6b..5d07b0e2c75 100644
--- a/src/mongo/s/query/blocking_results_merger_test.cpp
+++ b/src/mongo/s/query/blocking_results_merger_test.cpp
@@ -169,7 +169,6 @@ TEST_F(ResultsMergerTestFixture, ShouldBeAbleToBlockUntilNextResultIsReadyWithDe
operationContext(), RouterExecStage::ExecContext::kGetMoreNoResultsYet));
ASSERT_FALSE(next.isEOF());
ASSERT_BSONOBJ_EQ(*next.getResult(), BSON("x" << 1));
-
});
// Schedule the response to the getMore which will return the next result and mark the cursor as
diff --git a/src/mongo/s/query/cluster_aggregate.cpp b/src/mongo/s/query/cluster_aggregate.cpp
index 4d1d85e9503..cf7bf138494 100644
--- a/src/mongo/s/query/cluster_aggregate.cpp
+++ b/src/mongo/s/query/cluster_aggregate.cpp
@@ -284,8 +284,7 @@ Status appendExplainResults(sharded_agg_helpers::DispatchShardPipelineResults&&
auto queryPlannerElement = data["queryPlanner"];
uassert(51157,
str::stream() << "Malformed explain response received from shard " << shardId
- << ": "
- << data.toString(),
+ << ": " << data.toString(),
queryPlannerElement);
explain << "queryPlanner" << queryPlannerElement;
if (auto executionStatsElement = data["executionStats"]) {
@@ -739,8 +738,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
!request.getRuntimeConstants());
uassert(51089,
str::stream() << "Internal parameter(s) [" << AggregationRequest::kNeedsMergeName
- << ", "
- << AggregationRequest::kFromMongosName
+ << ", " << AggregationRequest::kFromMongosName
<< "] cannot be set to 'true' when sent to mongos",
!request.needsMerge() && !request.isFromMongos());
auto executionNsRoutingInfoStatus =
diff --git a/src/mongo/s/query/cluster_aggregation_planner.cpp b/src/mongo/s/query/cluster_aggregation_planner.cpp
index 989231ce951..bf8665894b8 100644
--- a/src/mongo/s/query/cluster_aggregation_planner.cpp
+++ b/src/mongo/s/query/cluster_aggregation_planner.cpp
@@ -318,8 +318,7 @@ BSONObj buildNewKeyPattern(const ShardKeyPattern& shardKey, StringMap<std::strin
auto it = renames.find(elem.fieldNameStringData());
invariant(it != renames.end(),
str::stream() << "Could not find new name of shard key field \""
- << elem.fieldName()
- << "\": rename map was "
+ << elem.fieldName() << "\": rename map was "
<< mapToString(renames));
newPattern.appendAs(elem, it->second);
}
diff --git a/src/mongo/s/query/cluster_client_cursor_params.h b/src/mongo/s/query/cluster_client_cursor_params.h
index 7106afcdfa1..9fff8d392df 100644
--- a/src/mongo/s/query/cluster_client_cursor_params.h
+++ b/src/mongo/s/query/cluster_client_cursor_params.h
@@ -157,4 +157,4 @@ struct ClusterClientCursorParams {
boost::optional<bool> isAutoCommit;
};
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index b25c26946cf..f5b3290a59a 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -57,9 +57,8 @@ Status cursorNotFoundStatus(const NamespaceString& nss, CursorId cursorId) {
Status cursorInUseStatus(const NamespaceString& nss, CursorId cursorId) {
return {ErrorCodes::CursorInUse,
- str::stream() << "Cursor already in use (namespace: '" << nss.ns() << "', id: "
- << cursorId
- << ")."};
+ str::stream() << "Cursor already in use (namespace: '" << nss.ns()
+ << "', id: " << cursorId << ")."};
}
//
@@ -349,9 +348,9 @@ StatusWith<ClusterCursorManager::PinnedCursor> ClusterCursorManager::checkOutCur
// Check if the user is coauthorized to access this cursor.
auto authCheckStatus = authChecker(entry->getAuthenticatedUsers());
if (!authCheckStatus.isOK()) {
- return authCheckStatus.withContext(
- str::stream() << "cursor id " << cursorId
- << " was not created by the authenticated user");
+ return authCheckStatus.withContext(str::stream()
+ << "cursor id " << cursorId
+ << " was not created by the authenticated user");
}
if (checkSessionAuth == kCheckSession) {
diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h
index 353727349b7..99ded693398 100644
--- a/src/mongo/s/query/cluster_cursor_manager.h
+++ b/src/mongo/s/query/cluster_cursor_manager.h
@@ -730,4 +730,4 @@ private:
size_t _cursorsTimedOut = 0;
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index b1d1cab8aec..6592e16a41c 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -99,9 +99,7 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
ErrorCodes::Overflow,
str::stream()
<< "sum of limit and skip cannot be represented as a 64-bit integer, limit: "
- << *qr.getLimit()
- << ", skip: "
- << qr.getSkip().value_or(0));
+ << *qr.getLimit() << ", skip: " << qr.getSkip().value_or(0));
}
newLimit = newLimitValue;
}
@@ -118,9 +116,7 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
str::stream()
<< "sum of ntoreturn and skip cannot be represented as a 64-bit "
"integer, ntoreturn: "
- << *qr.getNToReturn()
- << ", skip: "
- << qr.getSkip().value_or(0));
+ << *qr.getNToReturn() << ", skip: " << qr.getSkip().value_or(0));
}
newLimit = newLimitValue;
} else {
@@ -131,9 +127,7 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
str::stream()
<< "sum of ntoreturn and skip cannot be represented as a 64-bit "
"integer, ntoreturn: "
- << *qr.getNToReturn()
- << ", skip: "
- << qr.getSkip().value_or(0));
+ << *qr.getNToReturn() << ", skip: " << qr.getSkip().value_or(0));
}
newNToReturn = newNToReturnValue;
}
@@ -412,8 +406,7 @@ CursorId ClusterFind::runQuery(OperationContext* opCtx,
uasserted(ErrorCodes::BadValue,
str::stream() << "Projection contains illegal field '"
<< AsyncResultsMerger::kSortKeyField
- << "': "
- << query.getQueryRequest().getProj());
+ << "': " << query.getQueryRequest().getProj());
}
auto const catalogCache = Grid::get(opCtx)->catalogCache();
@@ -436,8 +429,8 @@ CursorId ClusterFind::runQuery(OperationContext* opCtx,
if (retries >= kMaxRetries) {
// Check if there are no retries remaining, so the last received error can be
// propagated to the caller.
- ex.addContext(str::stream() << "Failed to run query after " << kMaxRetries
- << " retries");
+ ex.addContext(str::stream()
+ << "Failed to run query after " << kMaxRetries << " retries");
throw;
}
@@ -463,8 +456,8 @@ CursorId ClusterFind::runQuery(OperationContext* opCtx,
if (retries >= kMaxRetries) {
// Check if there are no retries remaining, so the last received error can be
// propagated to the caller.
- ex.addContext(str::stream() << "Failed to run query after " << kMaxRetries
- << " retries");
+ ex.addContext(str::stream()
+ << "Failed to run query after " << kMaxRetries << " retries");
throw;
} else if (!ErrorCodes::isStaleShardVersionError(ex.code()) &&
ex.code() != ErrorCodes::ShardNotFound) {
@@ -514,8 +507,7 @@ void validateLSID(OperationContext* opCtx,
if (!opCtx->getLogicalSessionId() && cursor->getLsid()) {
uasserted(50800,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in session "
- << *cursor->getLsid()
+ << ", which was created in session " << *cursor->getLsid()
<< ", without an lsid");
}
@@ -523,10 +515,8 @@ void validateLSID(OperationContext* opCtx,
(*opCtx->getLogicalSessionId() != *cursor->getLsid())) {
uasserted(50801,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in session "
- << *cursor->getLsid()
- << ", in session "
- << *opCtx->getLogicalSessionId());
+ << ", which was created in session " << *cursor->getLsid()
+ << ", in session " << *opCtx->getLogicalSessionId());
}
}
@@ -547,8 +537,7 @@ void validateTxnNumber(OperationContext* opCtx,
if (!opCtx->getTxnNumber() && cursor->getTxnNumber()) {
uasserted(50803,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in transaction "
- << *cursor->getTxnNumber()
+ << ", which was created in transaction " << *cursor->getTxnNumber()
<< ", without a txnNumber");
}
@@ -556,10 +545,8 @@ void validateTxnNumber(OperationContext* opCtx,
(*opCtx->getTxnNumber() != *cursor->getTxnNumber())) {
uasserted(50804,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in transaction "
- << *cursor->getTxnNumber()
- << ", in transaction "
- << *opCtx->getTxnNumber());
+ << ", which was created in transaction " << *cursor->getTxnNumber()
+ << ", in transaction " << *opCtx->getTxnNumber());
}
}
diff --git a/src/mongo/s/query/router_stage_pipeline.cpp b/src/mongo/s/query/router_stage_pipeline.cpp
index b617af5ba01..bfdd71fd104 100644
--- a/src/mongo/s/query/router_stage_pipeline.cpp
+++ b/src/mongo/s/query/router_stage_pipeline.cpp
@@ -106,8 +106,7 @@ BSONObj RouterStagePipeline::_validateAndConvertToBSON(const Document& event) {
"event makes it impossible to resume the stream from that point. Only "
"transformations that retain the unmodified _id field are allowed. "
"Expected: "
- << BSON("_id" << resumeToken)
- << " but found: "
+ << BSON("_id" << resumeToken) << " but found: "
<< (eventBSON["_id"] ? BSON("_id" << eventBSON["_id"]) : BSONObj()),
idField.binaryEqual(resumeToken));
diff --git a/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp b/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp
index a1a7404b486..e1e85a70ef6 100644
--- a/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp
+++ b/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp
@@ -50,8 +50,9 @@ OperationContext* opCtx = nullptr;
TEST(RouterStageRemoveMetadataFieldsTest, RemovesMetaDataFields) {
auto mockStage = std::make_unique<RouterStageMock>(opCtx);
mockStage->queueResult(BSON("a" << 4 << "$sortKey" << 1 << "b" << 3));
- mockStage->queueResult(BSON("$sortKey" << BSON("" << 3) << "c" << BSON("d"
- << "foo")));
+ mockStage->queueResult(BSON("$sortKey" << BSON("" << 3) << "c"
+ << BSON("d"
+ << "foo")));
mockStage->queueResult(BSON("a" << 3));
mockStage->queueResult(BSON("a" << 3 << "$randVal" << 4 << "$sortKey" << 2));
mockStage->queueResult(
diff --git a/src/mongo/s/query/store_possible_cursor.h b/src/mongo/s/query/store_possible_cursor.h
index 38b13b4ea7a..43157322b0b 100644
--- a/src/mongo/s/query/store_possible_cursor.h
+++ b/src/mongo/s/query/store_possible_cursor.h
@@ -72,7 +72,7 @@ class TaskExecutor;
* @ cursorManager the ClusterCursorManager on which to register the resulting ClusterClientCursor
* @ privileges the PrivilegeVector of privileges needed for the original command, to be used for
* auth checking by GetMore
-*/
+ */
StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
const ShardId& shardId,
const HostAndPort& server,
diff --git a/src/mongo/s/request_types/add_shard_request_test.cpp b/src/mongo/s/request_types/add_shard_request_test.cpp
index 87ae164f2a7..8b28a1921b5 100644
--- a/src/mongo/s/request_types/add_shard_request_test.cpp
+++ b/src/mongo/s/request_types/add_shard_request_test.cpp
@@ -66,9 +66,8 @@ TEST(AddShardRequest, ParseInternalFieldsInvalidConnectionString) {
TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
{
- BSONObj obj =
- BSON(AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName
- << kShardName);
+ BSONObj obj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -81,9 +80,8 @@ TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
}
{
- BSONObj obj =
- BSON(AddShardRequest::configsvrAddShard << kConnString << AddShardRequest::shardName
- << kShardName);
+ BSONObj obj = BSON(AddShardRequest::configsvrAddShard
+ << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
@@ -99,9 +97,8 @@ TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
TEST(AddShardRequest, ParseInternalFieldsMissingName) {
{
- BSONObj obj =
- BSON(AddShardRequest::mongosAddShard << kConnString << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -114,9 +111,8 @@ TEST(AddShardRequest, ParseInternalFieldsMissingName) {
}
{
- BSONObj obj =
- BSON(AddShardRequest::configsvrAddShard << kConnString << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::configsvrAddShard
+ << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -131,11 +127,9 @@ TEST(AddShardRequest, ParseInternalFieldsMissingName) {
TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
{
- BSONObj obj =
- BSON(AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName
- << kShardName
- << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::shardName << kShardName
+ << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -149,11 +143,9 @@ TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
}
{
- BSONObj obj =
- BSON(AddShardRequest::configsvrAddShard << kConnString << AddShardRequest::shardName
- << kShardName
- << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::configsvrAddShard
+ << kConnString << AddShardRequest::shardName << kShardName
+ << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -170,10 +162,9 @@ TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
// Test converting a valid AddShardRequest to the internal config version of the command.
TEST(AddShardRequest, ToCommandForConfig) {
- BSONObj mongosCmdObj = BSON(
- AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName << kShardName
- << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::shardName << kShardName
+ << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -186,8 +177,8 @@ TEST(AddShardRequest, ToCommandForConfig) {
}
TEST(AddShardRequest, ToCommandForConfigMissingName) {
- BSONObj mongosCmdObj = BSON(
- AddShardRequest::mongosAddShard << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -200,8 +191,8 @@ TEST(AddShardRequest, ToCommandForConfigMissingName) {
}
TEST(AddShardRequest, ToCommandForConfigMissingMaxSize) {
- BSONObj mongosCmdObj = BSON(
- AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName << kShardName);
+ BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
diff --git a/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp b/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp
index 277302c3c0c..7a9b2b8141e 100644
--- a/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp
+++ b/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp
@@ -92,8 +92,7 @@ TEST(AddShardToZoneRequest, WrongShardNameTypeErrors) {
TEST(AddShardToZoneRequest, WrongZoneNameTypeErrors) {
auto request = AddShardToZoneRequest::parseFromMongosCommand(BSON("addShardToZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
@@ -141,25 +140,23 @@ TEST(CfgAddShardToZoneRequest, MissingShardNameErrors) {
}
TEST(CfgAddShardToZoneRequest, WrongShardNameTypeErrors) {
- auto request = AddShardToZoneRequest::parseFromConfigCommand(
- BSON("_configsvrAddShardToZone" << 1234 << "zone"
- << "z"));
+ auto request = AddShardToZoneRequest::parseFromConfigCommand(BSON("_configsvrAddShardToZone"
+ << 1234 << "zone"
+ << "z"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(CfgAddShardToZoneRequest, WrongZoneNameTypeErrors) {
auto request = AddShardToZoneRequest::parseFromConfigCommand(BSON("_configsvrAddShardToZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(CfgAddShardToZoneRequest, CannotUseConfigToParseMongosCommand) {
auto request = AddShardToZoneRequest::parseFromConfigCommand(BSON("addShardToZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
diff --git a/src/mongo/s/request_types/balance_chunk_request_test.cpp b/src/mongo/s/request_types/balance_chunk_request_test.cpp
index f3f0a14b320..df15b79669d 100644
--- a/src/mongo/s/request_types/balance_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/balance_chunk_request_test.cpp
@@ -45,18 +45,13 @@ using unittest::assertGet;
TEST(BalanceChunkRequest, ParseFromConfigCommandNoSecondaryThrottle) {
const ChunkVersion version(1, 0, OID::gen());
auto request = assertGet(BalanceChunkRequest::parseFromConfigCommand(
- BSON("_configsvrMoveChunk" << 1 << "ns"
- << "TestDB.TestColl"
- << "min"
- << BSON("a" << -100LL)
- << "max"
- << BSON("a" << 100LL)
- << "shard"
- << "TestShard0000"
- << "lastmod"
- << Date_t::fromMillisSinceEpoch(version.toLong())
- << "lastmodEpoch"
- << version.epoch())));
+ BSON("_configsvrMoveChunk"
+ << 1 << "ns"
+ << "TestDB.TestColl"
+ << "min" << BSON("a" << -100LL) << "max" << BSON("a" << 100LL) << "shard"
+ << "TestShard0000"
+ << "lastmod" << Date_t::fromMillisSinceEpoch(version.toLong()) << "lastmodEpoch"
+ << version.epoch())));
const auto& chunk = request.getChunk();
ASSERT_EQ("TestDB.TestColl", chunk.getNS().ns());
ASSERT_BSONOBJ_EQ(BSON("a" << -100LL), chunk.getMin());
@@ -72,21 +67,14 @@ TEST(BalanceChunkRequest, ParseFromConfigCommandNoSecondaryThrottle) {
TEST(BalanceChunkRequest, ParseFromConfigCommandWithSecondaryThrottle) {
const ChunkVersion version(1, 0, OID::gen());
auto request = assertGet(BalanceChunkRequest::parseFromConfigCommand(
- BSON("_configsvrMoveChunk" << 1 << "ns"
- << "TestDB.TestColl"
- << "min"
- << BSON("a" << -100LL)
- << "max"
- << BSON("a" << 100LL)
- << "shard"
- << "TestShard0000"
- << "lastmod"
- << Date_t::fromMillisSinceEpoch(version.toLong())
- << "lastmodEpoch"
- << version.epoch()
- << "secondaryThrottle"
- << BSON("_secondaryThrottle" << true << "writeConcern"
- << BSON("w" << 2)))));
+ BSON("_configsvrMoveChunk"
+ << 1 << "ns"
+ << "TestDB.TestColl"
+ << "min" << BSON("a" << -100LL) << "max" << BSON("a" << 100LL) << "shard"
+ << "TestShard0000"
+ << "lastmod" << Date_t::fromMillisSinceEpoch(version.toLong()) << "lastmodEpoch"
+ << version.epoch() << "secondaryThrottle"
+ << BSON("_secondaryThrottle" << true << "writeConcern" << BSON("w" << 2)))));
const auto& chunk = request.getChunk();
ASSERT_EQ("TestDB.TestColl", chunk.getNS().ns());
ASSERT_BSONOBJ_EQ(BSON("a" << -100LL), chunk.getMin());
diff --git a/src/mongo/s/request_types/merge_chunk_request_test.cpp b/src/mongo/s/request_types/merge_chunk_request_test.cpp
index 7a300c5e813..94a7bf68511 100644
--- a/src/mongo/s/request_types/merge_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/merge_chunk_request_test.cpp
@@ -42,11 +42,8 @@ TEST(MergeChunkRequest, BasicValidConfigCommand) {
auto request = assertGet(MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000")));
ASSERT_EQ(NamespaceString("TestDB", "TestColl"), request.getNamespace());
ASSERT_EQ(OID("7fffffff0000000000000001"), request.getEpoch());
@@ -60,14 +57,10 @@ TEST(MergeChunkRequest, ConfigCommandtoBSON) {
BSONObj serializedRequest =
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000"
- << "validAfter"
- << Timestamp{100});
+ << "validAfter" << Timestamp{100});
BSONObj writeConcernObj = BSON("writeConcern" << BSON("w"
<< "majority"));
@@ -84,11 +77,10 @@ TEST(MergeChunkRequest, ConfigCommandtoBSON) {
}
TEST(MergeChunkRequest, MissingNameSpaceErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(
- BSON("collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
- << "shard0000"));
+ auto request = MergeChunkRequest::parseFromConfigCommand(BSON(
+ "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
@@ -96,20 +88,18 @@ TEST(MergeChunkRequest, MissingCollEpochErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
+ << "chunkBoundaries" << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
<< "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(MergeChunkRequest, MissingChunkBoundariesErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkMerge"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "shard"
- << "shard0000"));
+ auto request = MergeChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkMerge"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
@@ -117,21 +107,17 @@ TEST(MergeChunkRequest, MissingShardNameErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
<< BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(MergeChunkRequest, WrongNamespaceTypeErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
- BSON("_configsvrCommitChunkMerge" << 1234 << "collEpoch" << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5)
- << BSON("a" << 10))
- << "shard"
- << "shard0000"));
+ BSON("_configsvrCommitChunkMerge"
+ << 1234 << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
@@ -139,37 +125,27 @@ TEST(MergeChunkRequest, WrongCollEpochTypeErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << 1234
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << 1234 << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(MergeChunkRequest, WrongChunkBoundariesTypeErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkMerge"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << 1234
- << "shard"
- << "shard0000"));
+ auto request = MergeChunkRequest::parseFromConfigCommand(BSON(
+ "_configsvrCommitChunkMerge"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries" << 1234 << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(MergeChunkRequest, WrongShardNameTypeErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(
- BSON("_configsvrCommitChunkMerge"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
- << 1234));
+ auto request = MergeChunkRequest::parseFromConfigCommand(BSON(
+ "_configsvrCommitChunkMerge"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
@@ -177,24 +153,19 @@ TEST(MergeChunkRequest, InvalidNamespaceErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< ""
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidNamespace, request.getStatus());
}
TEST(MergeChunkRequest, EmptyChunkBoundariesErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkMerge"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSONArray()
- << "shard"
- << "shard0000"));
+ auto request = MergeChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkMerge"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries" << BSONArray()
+ << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidOptions, request.getStatus());
}
@@ -202,11 +173,8 @@ TEST(MergeChunkRequest, TooFewChunkBoundariesErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 10)) << "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidOptions, request.getStatus());
}
diff --git a/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp b/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp
index 49332950329..b295e3f0b3d 100644
--- a/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp
+++ b/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp
@@ -178,8 +178,9 @@ TEST(MigrationSecondaryThrottleOptions, ParseFailsDisabledInCommandBSONWriteConc
TEST(MigrationSecondaryThrottleOptions, ParseFailsNotSpecifiedInCommandBSONWriteConcernSpecified) {
auto status = MigrationSecondaryThrottleOptions::createFromCommand(
- BSON("someOtherField" << 1 << "writeConcern" << BSON("w"
- << "majority")));
+ BSON("someOtherField" << 1 << "writeConcern"
+ << BSON("w"
+ << "majority")));
ASSERT_EQ(ErrorCodes::UnsupportedFormat, status.getStatus().code());
}
diff --git a/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp b/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp
index d8b6c94c61e..67981bd7f67 100644
--- a/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp
+++ b/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp
@@ -85,17 +85,16 @@ TEST(RemoveShardFromZoneRequest, MissingShardNameErrors) {
}
TEST(RemoveShardFromZoneRequest, WrongShardNameTypeErrors) {
- auto request = RemoveShardFromZoneRequest::parseFromMongosCommand(
- BSON("removeShardFromZone" << 1234 << "zone"
- << "z"));
+ auto request = RemoveShardFromZoneRequest::parseFromMongosCommand(BSON("removeShardFromZone"
+ << 1234 << "zone"
+ << "z"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(RemoveShardFromZoneRequest, WrongZoneNameTypeErrors) {
auto request = RemoveShardFromZoneRequest::parseFromMongosCommand(BSON("removeShardFromZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
@@ -155,16 +154,14 @@ TEST(CfgRemoveShardFromZoneRequest, WrongZoneNameTypeErrors) {
auto request =
RemoveShardFromZoneRequest::parseFromConfigCommand(BSON("_configsvrRemoveShardFromZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(CfgRemoveShardFromZoneRequest, CannotUseConfigToParseMongosCommand) {
auto request = RemoveShardFromZoneRequest::parseFromConfigCommand(BSON("removeShardFromZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
diff --git a/src/mongo/s/request_types/set_shard_version_request_test.cpp b/src/mongo/s/request_types/set_shard_version_request_test.cpp
index fb1052cc48d..59003730f98 100644
--- a/src/mongo/s/request_types/set_shard_version_request_test.cpp
+++ b/src/mongo/s/request_types/set_shard_version_request_test.cpp
@@ -47,15 +47,12 @@ const ConnectionString shardCS = ConnectionString::forReplicaSet(
"ShardRS", {HostAndPort{"shardHost1:12345"}, HostAndPort{"shardHost2:12345"}});
TEST(SetShardVersionRequest, ParseInitMissingAuthoritative) {
- SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "init"
- << true
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString())));
+ SetShardVersionRequest request = assertGet(
+ SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << ""
+ << "init" << true << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString())));
ASSERT(request.isInit());
ASSERT(!request.isAuthoritative());
@@ -66,16 +63,12 @@ TEST(SetShardVersionRequest, ParseInitMissingAuthoritative) {
TEST(SetShardVersionRequest, ParseInitWithAuthoritative) {
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "init"
- << true
- << "authoritative"
- << true
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString())));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << ""
+ << "init" << true << "authoritative" << true << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString())));
ASSERT(request.isInit());
ASSERT(request.isAuthoritative());
@@ -86,18 +79,12 @@ TEST(SetShardVersionRequest, ParseInitWithAuthoritative) {
TEST(SetShardVersionRequest, ParseInitNoConnectionVersioning) {
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "init"
- << true
- << "authoritative"
- << true
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "noConnectionVersioning"
- << true)));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << ""
+ << "init" << true << "authoritative" << true << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "noConnectionVersioning" << true)));
ASSERT(request.isInit());
ASSERT(request.isAuthoritative());
@@ -110,16 +97,13 @@ TEST(SetShardVersionRequest, ParseFull) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "db.coll"
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch())));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << "db.coll"
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch())));
ASSERT(!request.isInit());
ASSERT(!request.shouldForceRefresh());
@@ -137,18 +121,14 @@ TEST(SetShardVersionRequest, ParseFullWithAuthoritative) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "db.coll"
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "authoritative"
- << true)));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << "db.coll"
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch()
+ << "authoritative" << true)));
ASSERT(!request.isInit());
ASSERT(!request.shouldForceRefresh());
@@ -166,18 +146,14 @@ TEST(SetShardVersionRequest, ParseFullNoConnectionVersioning) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "db.coll"
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "noConnectionVersioning"
- << true)));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << "db.coll"
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch()
+ << "noConnectionVersioning" << true)));
ASSERT(!request.isInit());
ASSERT(!request.shouldForceRefresh());
@@ -194,16 +170,14 @@ TEST(SetShardVersionRequest, ParseFullNoConnectionVersioning) {
TEST(SetShardVersionRequest, ParseFullNoNS) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
- auto ssvStatus = SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()));
+ auto ssvStatus =
+ SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << ""
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch" << chunkVersion.epoch()));
ASSERT_EQ(ErrorCodes::InvalidNamespace, ssvStatus.getStatus().code());
}
@@ -211,16 +185,14 @@ TEST(SetShardVersionRequest, ParseFullNoNS) {
TEST(SetShardVersionRequest, ParseFullNSContainsDBOnly) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
- auto ssvStatus = SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "dbOnly"
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()));
+ auto ssvStatus =
+ SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << "dbOnly"
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch" << chunkVersion.epoch()));
ASSERT_EQ(ErrorCodes::InvalidNamespace, ssvStatus.getStatus().code());
}
@@ -239,20 +211,10 @@ TEST(SetShardVersionRequest, ToSSVCommandInit) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< ""
- << "init"
- << true
- << "forceRefresh"
- << false
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << true << "forceRefresh" << false << "authoritative" << true
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "maxTimeMS"
- << 30000));
+ << "shardHost" << shardCS.toString() << "maxTimeMS" << 30000));
}
TEST(SetShardVersionRequest, ToSSVCommandFull) {
@@ -273,21 +235,11 @@ TEST(SetShardVersionRequest, ToSSVCommandFull) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << false
- << "authoritative"
- << false
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << false << "authoritative" << false
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
<< chunkVersion.epoch()));
}
@@ -309,21 +261,11 @@ TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << false
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << false << "authoritative" << true
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
<< chunkVersion.epoch()));
}
@@ -351,21 +293,11 @@ TEST(SetShardVersionRequest, ToSSVCommandFullForceRefresh) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << true
- << "authoritative"
- << false
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << true << "authoritative" << false
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
<< chunkVersion.epoch()));
}
@@ -387,24 +319,12 @@ TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioning) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << false
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << false << "authoritative" << true
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "noConnectionVersioning"
- << true));
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
+ << chunkVersion.epoch() << "noConnectionVersioning" << true));
}
TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioningForceRefresh) {
@@ -431,24 +351,12 @@ TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioningForceRefresh)
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << true
- << "authoritative"
- << false
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << true << "authoritative" << false
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "noConnectionVersioning"
- << true));
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
+ << chunkVersion.epoch() << "noConnectionVersioning" << true));
}
diff --git a/src/mongo/s/request_types/split_chunk_request_test.cpp b/src/mongo/s/request_types/split_chunk_request_test.cpp
index d73f6c96591..1727c3aa792 100644
--- a/src/mongo/s/request_types/split_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/split_chunk_request_test.cpp
@@ -41,19 +41,12 @@ namespace {
using unittest::assertGet;
TEST(SplitChunkRequest, BasicValidConfigCommand) {
- auto request =
- assertGet(SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000")));
+ auto request = assertGet(SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000")));
ASSERT_EQ(NamespaceString("TestDB", "TestColl"), request.getNamespace());
ASSERT_EQ(OID("7fffffff0000000000000001"), request.getEpoch());
ASSERT(ChunkRange(BSON("a" << 1), BSON("a" << 10)) == request.getChunkRange());
@@ -65,14 +58,8 @@ TEST(SplitChunkRequest, ValidWithMultipleSplits) {
auto request = assertGet(SplitChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkSplit"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5) << BSON("a" << 7))
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5) << BSON("a" << 7))
<< "shard"
<< "shard0000")));
ASSERT_EQ(NamespaceString("TestDB", "TestColl"), request.getNamespace());
@@ -84,18 +71,12 @@ TEST(SplitChunkRequest, ValidWithMultipleSplits) {
}
TEST(SplitChunkRequest, ConfigCommandtoBSON) {
- BSONObj serializedRequest = BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000");
+ BSONObj serializedRequest =
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000");
BSONObj writeConcernObj = BSON("writeConcern" << BSON("w"
<< "majority"));
@@ -112,197 +93,129 @@ TEST(SplitChunkRequest, ConfigCommandtoBSON) {
}
TEST(SplitChunkRequest, MissingNamespaceErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(
- BSON("collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(BSON(
+ "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, MissingCollEpochErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "min" << BSON("a" << 1) << "max" << BSON("a" << 10) << "splitPoints"
+ << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, MissingChunkToSplitErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "max" << BSON("a" << 10)
+ << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, MissingSplitPointErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, MissingShardNameErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5))));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, WrongNamespaceTypeErrors) {
auto request = SplitChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkSplit" << 1234 << "collEpoch" << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
+ << "min" << BSON("a" << 1) << "max" << BSON("a" << 10)
+ << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, WrongCollEpochTypeErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << 1234
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << 1234 << "min" << BSON("a" << 1) << "max" << BSON("a" << 10)
+ << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, WrongChunkToSplitTypeErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << 1234
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << 1234 << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, WrongSplitPointTypeErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << 1234
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << 1234 << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, WrongShardNameTypeErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << 1234));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, InvalidNamespaceErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << ""
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << ""
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidNamespace, request.getStatus());
}
TEST(SplitChunkRequest, EmptyChunkToSplitErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSONObj()
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSONObj() << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::BadValue, request.getStatus());
}
TEST(SplitChunkRequest, EmptySplitPointsErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSONArray()
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSONArray() << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidOptions, request.getStatus());
}
-}
+} // namespace
} // namespace mongo
diff --git a/src/mongo/s/request_types/split_chunk_request_type.cpp b/src/mongo/s/request_types/split_chunk_request_type.cpp
index 8993efac965..6773e413197 100644
--- a/src/mongo/s/request_types/split_chunk_request_type.cpp
+++ b/src/mongo/s/request_types/split_chunk_request_type.cpp
@@ -161,8 +161,8 @@ const string& SplitChunkRequest::getShardName() const {
Status SplitChunkRequest::_validate() {
if (!getNamespace().isValid()) {
return Status(ErrorCodes::InvalidNamespace,
- str::stream() << "invalid namespace '" << _nss.ns()
- << "' specified for request");
+ str::stream()
+ << "invalid namespace '" << _nss.ns() << "' specified for request");
}
if (getSplitPoints().empty()) {
diff --git a/src/mongo/s/request_types/update_zone_key_range_request_type.cpp b/src/mongo/s/request_types/update_zone_key_range_request_type.cpp
index 350489aa242..cfbce859483 100644
--- a/src/mongo/s/request_types/update_zone_key_range_request_type.cpp
+++ b/src/mongo/s/request_types/update_zone_key_range_request_type.cpp
@@ -107,10 +107,7 @@ StatusWith<UpdateZoneKeyRangeRequest> UpdateZoneKeyRangeRequest::_parseFromComma
} else {
return {ErrorCodes::TypeMismatch,
str::stream() << "\"" << kZoneName << "\" had the wrong type. Expected "
- << typeName(String)
- << " or "
- << typeName(jstNULL)
- << ", found "
+ << typeName(String) << " or " << typeName(jstNULL) << ", found "
<< typeName(zoneElem.type())};
}
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index d75120849af..1cfaaf92e71 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -347,12 +347,14 @@ Status initializeSharding(OperationContext* opCtx) {
auto targeterFactoryPtr = targeterFactory.get();
ShardFactory::BuilderCallable setBuilder = [targeterFactoryPtr](
- const ShardId& shardId, const ConnectionString& connStr) {
+ const ShardId& shardId,
+ const ConnectionString& connStr) {
return std::make_unique<ShardRemote>(shardId, connStr, targeterFactoryPtr->create(connStr));
};
ShardFactory::BuilderCallable masterBuilder = [targeterFactoryPtr](
- const ShardId& shardId, const ConnectionString& connStr) {
+ const ShardId& shardId,
+ const ConnectionString& connStr) {
return std::make_unique<ShardRemote>(shardId, connStr, targeterFactoryPtr->create(connStr));
};
@@ -426,7 +428,7 @@ public:
void onConfirmedSet(const State& state) final {
auto connStr = state.connStr;
- auto fun = [ serviceContext = _serviceContext, connStr ](auto args) {
+ auto fun = [serviceContext = _serviceContext, connStr](auto args) {
if (ErrorCodes::isCancelationError(args.status.code())) {
return;
}
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index ca229e13cd4..07362cc5fc1 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -89,8 +89,7 @@ std::vector<std::unique_ptr<FieldRef>> parseShardKeyPattern(const BSONObj& keyPa
// Numeric and ascending (1.0), or "hashed" and single field
uassert(ErrorCodes::BadValue,
str::stream()
- << "Shard key "
- << keyPattern.toString()
+ << "Shard key " << keyPattern.toString()
<< " can contain either a single 'hashed' field"
<< " or multiple numerical fields set to a value of 1. Failed to parse field "
<< patternEl.fieldNameStringData(),
@@ -163,10 +162,7 @@ Status ShardKeyPattern::checkShardKeySize(const BSONObj& shardKey) {
return {ErrorCodes::ShardKeyTooBig,
str::stream() << "shard keys must be less than " << kMaxShardKeySizeBytes
- << " bytes, but key "
- << shardKey
- << " is "
- << shardKey.objsize()
+ << " bytes, but key " << shardKey << " is " << shardKey.objsize()
<< " bytes"};
}
diff --git a/src/mongo/s/shard_key_pattern_test.cpp b/src/mongo/s/shard_key_pattern_test.cpp
index 808ccfbe419..4ec7e2df4c3 100644
--- a/src/mongo/s/shard_key_pattern_test.cpp
+++ b/src/mongo/s/shard_key_pattern_test.cpp
@@ -140,8 +140,7 @@ TEST(ShardKeyPattern, ExtractDocShardKeySingle) {
BSON("a" << regex));
const BSONObj ref = BSON("$ref"
<< "coll"
- << "$id"
- << 1);
+ << "$id" << 1);
ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << ref)), BSON("a" << ref));
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$dollarPrefixKey:true}}")),
fromjson("{a:{$dollarPrefixKey:true}}"));
@@ -169,8 +168,7 @@ TEST(ShardKeyPattern, ExtractDocShardKeyCompound) {
ASSERT_BSONOBJ_EQ(docKey(pattern,
BSON("c" << 30 << "b"
<< "20"
- << "a"
- << 10)),
+ << "a" << 10)),
fromjson("{a:10, b:'20'}"));
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$dollarPrefixKey:true}}")),
fromjson("{a:10, b:{$dollarPrefixKey:true}}"));
@@ -199,8 +197,7 @@ TEST(ShardKeyPattern, ExtractDocShardKeyNested) {
fromjson("{'a.b':10, c:30}"));
const BSONObj ref = BSON("$ref"
<< "coll"
- << "$id"
- << 1);
+ << "$id" << 1);
ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << BSON("b" << ref) << "c" << 30)),
BSON("a.b" << ref << "c" << 30));
@@ -308,8 +305,7 @@ TEST(ShardKeyPattern, ExtractQueryShardKeyCompound) {
ASSERT_BSONOBJ_EQ(queryKey(pattern,
BSON("c" << 30 << "b"
<< "20"
- << "a"
- << 10)),
+ << "a" << 10)),
fromjson("{a:10, b:'20'}"));
ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10, b:[1, 2]}")), BSONObj());
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index 59faf424f90..1f430a0d7be 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -156,18 +156,16 @@ StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
// is already performed at chunk split commit time, but we are performing it here for parity
// with old auto-split code, which might rely on it.
if (SimpleBSONObjComparator::kInstance.evaluate(chunkRange.getMin() == splitPoints.front())) {
- const std::string msg(str::stream() << "not splitting chunk " << chunkRange.toString()
- << ", split point "
- << splitPoints.front()
- << " is exactly on chunk bounds");
+ const std::string msg(str::stream()
+ << "not splitting chunk " << chunkRange.toString() << ", split point "
+ << splitPoints.front() << " is exactly on chunk bounds");
return {ErrorCodes::CannotSplit, msg};
}
if (SimpleBSONObjComparator::kInstance.evaluate(chunkRange.getMax() == splitPoints.back())) {
- const std::string msg(str::stream() << "not splitting chunk " << chunkRange.toString()
- << ", split point "
- << splitPoints.back()
- << " is exactly on chunk bounds");
+ const std::string msg(str::stream()
+ << "not splitting chunk " << chunkRange.toString() << ", split point "
+ << splitPoints.back() << " is exactly on chunk bounds");
return {ErrorCodes::CannotSplit, msg};
}
diff --git a/src/mongo/s/sharding_egress_metadata_hook.cpp b/src/mongo/s/sharding_egress_metadata_hook.cpp
index 468fe77bae1..10e837a2430 100644
--- a/src/mongo/s/sharding_egress_metadata_hook.cpp
+++ b/src/mongo/s/sharding_egress_metadata_hook.cpp
@@ -120,8 +120,8 @@ Status ShardingEgressMetadataHook::_advanceConfigOpTimeFromShard(OperationContex
if (opTime.is_initialized()) {
grid->advanceConfigOpTime(opCtx,
opTime.get(),
- str::stream() << "reply from shard " << shardId
- << " node");
+ str::stream()
+ << "reply from shard " << shardId << " node");
}
}
return Status::OK();
diff --git a/src/mongo/s/sharding_initialization.h b/src/mongo/s/sharding_initialization.h
index 0a7575b404f..246633ce8a8 100644
--- a/src/mongo/s/sharding_initialization.h
+++ b/src/mongo/s/sharding_initialization.h
@@ -86,7 +86,7 @@ Status initializeGlobalShardingState(OperationContext* opCtx,
/**
* Loads cluster ID and waits for the reload of the Shard Registry.
-*/
+ */
Status waitForShardRegistryReload(OperationContext* opCtx);
diff --git a/src/mongo/s/sharding_mongod_test_fixture.cpp b/src/mongo/s/sharding_mongod_test_fixture.cpp
index 29f029933dd..c31c79affe3 100644
--- a/src/mongo/s/sharding_mongod_test_fixture.cpp
+++ b/src/mongo/s/sharding_mongod_test_fixture.cpp
@@ -115,9 +115,8 @@ void ShardingMongodTestFixture::setUp() {
serversBob.append(BSON("host" << _servers[i].toString() << "_id" << static_cast<int>(i)));
}
repl::ReplSetConfig replSetConfig;
- ASSERT_OK(replSetConfig.initialize(
- BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3 << "members"
- << serversBob.arr())));
+ ASSERT_OK(replSetConfig.initialize(BSON("_id" << _setName << "protocolVersion" << 1 << "version"
+ << 3 << "members" << serversBob.arr())));
replCoordPtr->setGetConfigReturnValue(replSetConfig);
repl::ReplicationCoordinator::set(service, std::move(replCoordPtr));
@@ -194,12 +193,14 @@ std::unique_ptr<ShardRegistry> ShardingMongodTestFixture::makeShardRegistry(
_targeterFactory = targeterFactoryPtr;
ShardFactory::BuilderCallable setBuilder = [targeterFactoryPtr](
- const ShardId& shardId, const ConnectionString& connStr) {
+ const ShardId& shardId,
+ const ConnectionString& connStr) {
return std::make_unique<ShardRemote>(shardId, connStr, targeterFactoryPtr->create(connStr));
};
ShardFactory::BuilderCallable masterBuilder = [targeterFactoryPtr](
- const ShardId& shardId, const ConnectionString& connStr) {
+ const ShardId& shardId,
+ const ConnectionString& connStr) {
return std::make_unique<ShardRemote>(shardId, connStr, targeterFactoryPtr->create(connStr));
};
diff --git a/src/mongo/s/sharding_router_test_fixture.cpp b/src/mongo/s/sharding_router_test_fixture.cpp
index c0b5e8d4a62..075a472c25b 100644
--- a/src/mongo/s/sharding_router_test_fixture.cpp
+++ b/src/mongo/s/sharding_router_test_fixture.cpp
@@ -152,12 +152,14 @@ ShardingTestFixture::ShardingTestFixture() {
_targeterFactory->addTargeterToReturn(configCS, std::move(configTargeter));
ShardFactory::BuilderCallable setBuilder = [targeterFactoryPtr](
- const ShardId& shardId, const ConnectionString& connStr) {
+ const ShardId& shardId,
+ const ConnectionString& connStr) {
return std::make_unique<ShardRemote>(shardId, connStr, targeterFactoryPtr->create(connStr));
};
ShardFactory::BuilderCallable masterBuilder = [targeterFactoryPtr](
- const ShardId& shardId, const ConnectionString& connStr) {
+ const ShardId& shardId,
+ const ConnectionString& connStr) {
return std::make_unique<ShardRemote>(shardId, connStr, targeterFactoryPtr->create(connStr));
};
@@ -334,10 +336,8 @@ void ShardingTestFixture::expectConfigCollectionCreate(const HostAndPort& config
BSON("create" << collName << "capped" << true << "size" << cappedSize << "writeConcern"
<< BSON("w"
<< "majority"
- << "wtimeout"
- << 60000)
- << "maxTimeMS"
- << 30000);
+ << "wtimeout" << 60000)
+ << "maxTimeMS" << 30000);
ASSERT_BSONOBJ_EQ(expectedCreateCmd, request.cmdObj);
return response;
diff --git a/src/mongo/s/sharding_task_executor.cpp b/src/mongo/s/sharding_task_executor.cpp
index 8a3e3c39b60..c8db2851af7 100644
--- a/src/mongo/s/sharding_task_executor.cpp
+++ b/src/mongo/s/sharding_task_executor.cpp
@@ -160,9 +160,12 @@ StatusWith<TaskExecutor::CallbackHandle> ShardingTaskExecutor::scheduleRemoteCom
auto clusterGLE = ClusterLastErrorInfo::get(request.opCtx->getClient());
- auto shardingCb =
- [ timeTracker, clusterGLE, cb, grid = Grid::get(request.opCtx), hosts = request.target ](
- const TaskExecutor::RemoteCommandOnAnyCallbackArgs& args) {
+ auto shardingCb = [timeTracker,
+ clusterGLE,
+ cb,
+ grid = Grid::get(request.opCtx),
+ hosts = request.target](
+ const TaskExecutor::RemoteCommandOnAnyCallbackArgs& args) {
ON_BLOCK_EXIT([&cb, &args]() { cb(args); });
if (!args.response.isOK()) {
diff --git a/src/mongo/s/sharding_task_executor_pool_controller.cpp b/src/mongo/s/sharding_task_executor_pool_controller.cpp
index ffcdd6cd82b..871293699ea 100644
--- a/src/mongo/s/sharding_task_executor_pool_controller.cpp
+++ b/src/mongo/s/sharding_task_executor_pool_controller.cpp
@@ -53,7 +53,7 @@ void emplaceOrInvariant(Map&& map, Args&&... args) noexcept {
invariant(ret.second, "Element already existed in map/set");
}
-} // anonymous
+} // namespace
Status ShardingTaskExecutorPoolController::validateHostTimeout(const int& hostTimeoutMS) {
auto toRefreshTimeoutMS = gParameters.toRefreshTimeoutMS.load();
diff --git a/src/mongo/s/transaction_router.cpp b/src/mongo/s/transaction_router.cpp
index c2bdc3d7f68..8bfb6cbbb39 100644
--- a/src/mongo/s/transaction_router.cpp
+++ b/src/mongo/s/transaction_router.cpp
@@ -41,7 +41,6 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/logical_clock.h"
#include "mongo/db/logical_session_id.h"
-#include "mongo/db/logical_session_id.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -520,17 +519,13 @@ void TransactionRouter::Router::_assertAbortStatusIsOkOrNoSuchTransaction(
auto shardResponse = uassertStatusOKWithContext(
std::move(response.swResponse),
str::stream() << "Failed to send abort to shard " << response.shardId
- << " between retries of statement "
- << p().latestStmtId);
+ << " between retries of statement " << p().latestStmtId);
auto status = getStatusFromCommandResult(shardResponse.data);
uassert(ErrorCodes::NoSuchTransaction,
str::stream() << txnIdToString() << "Transaction aborted between retries of statement "
- << p().latestStmtId
- << " due to error: "
- << status
- << " from shard: "
- << response.shardId,
+ << p().latestStmtId << " due to error: " << status
+ << " from shard: " << response.shardId,
status.isOK() || status.code() == ErrorCodes::NoSuchTransaction);
// abortTransaction is sent with no write concern, so there's no need to check for a write
@@ -658,8 +653,9 @@ void TransactionRouter::Router::onSnapshotError(OperationContext* opCtx,
const Status& errorStatus) {
invariant(canContinueOnSnapshotError());
- LOG(3) << txnIdToString() << " Clearing pending participants and resetting global snapshot "
- "timestamp after snapshot error: "
+ LOG(3) << txnIdToString()
+ << " Clearing pending participants and resetting global snapshot "
+ "timestamp after snapshot error: "
<< errorStatus << ", previous timestamp: " << o().atClusterTime->getTime();
// The transaction must be restarted on all participants because a new read timestamp will be
@@ -711,17 +707,14 @@ void TransactionRouter::Router::beginOrContinueTxn(OperationContext* opCtx,
// This transaction is older than the transaction currently in progress, so throw an error.
uasserted(ErrorCodes::TransactionTooOld,
str::stream() << "txnNumber " << txnNumber << " is less than last txnNumber "
- << o().txnNumber
- << " seen in session "
- << _sessionId());
+ << o().txnNumber << " seen in session " << _sessionId());
} else if (txnNumber == o().txnNumber) {
// This is the same transaction as the one in progress.
switch (action) {
case TransactionActions::kStart: {
uasserted(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "txnNumber " << o().txnNumber << " for session "
- << _sessionId()
- << " already started");
+ << _sessionId() << " already started");
}
case TransactionActions::kContinue: {
uassert(ErrorCodes::InvalidOptions,
@@ -767,11 +760,9 @@ void TransactionRouter::Router::beginOrContinueTxn(OperationContext* opCtx,
}
case TransactionActions::kContinue: {
uasserted(ErrorCodes::NoSuchTransaction,
- str::stream() << "cannot continue txnId " << o().txnNumber
- << " for session "
- << _sessionId()
- << " with txnId "
- << txnNumber);
+ str::stream()
+ << "cannot continue txnId " << o().txnNumber << " for session "
+ << _sessionId() << " with txnId " << txnNumber);
}
case TransactionActions::kCommit: {
_resetRouterState(opCtx, txnNumber);
@@ -896,11 +887,10 @@ BSONObj TransactionRouter::Router::_commitTransaction(
switch (participant.second.readOnly) {
case Participant::ReadOnly::kUnset:
uasserted(ErrorCodes::NoSuchTransaction,
- str::stream() << txnIdToString() << " Failed to commit transaction "
- << "because a previous statement on the transaction "
- << "participant "
- << participant.first
- << " was unsuccessful.");
+ str::stream()
+ << txnIdToString() << " Failed to commit transaction "
+ << "because a previous statement on the transaction "
+ << "participant " << participant.first << " was unsuccessful.");
case Participant::ReadOnly::kReadOnly:
readOnlyShards.push_back(participant.first);
break;
@@ -1019,8 +1009,9 @@ void TransactionRouter::Router::implicitlyAbortTransaction(OperationContext* opC
const Status& errorStatus) {
if (o().commitType == CommitType::kTwoPhaseCommit ||
o().commitType == CommitType::kRecoverWithToken) {
- LOG(3) << txnIdToString() << " Router not sending implicit abortTransaction because commit "
- "may have been handed off to the coordinator";
+ LOG(3) << txnIdToString()
+ << " Router not sending implicit abortTransaction because commit "
+ "may have been handed off to the coordinator";
return;
}
diff --git a/src/mongo/s/transaction_router.h b/src/mongo/s/transaction_router.h
index 4d442f3a225..82e7498523a 100644
--- a/src/mongo/s/transaction_router.h
+++ b/src/mongo/s/transaction_router.h
@@ -215,78 +215,78 @@ public:
}
/**
- * Starts a fresh transaction in this session or continue an existing one. Also cleans up the
- * previous transaction state.
- */
+ * Starts a fresh transaction in this session or continue an existing one. Also cleans up
+ * the previous transaction state.
+ */
void beginOrContinueTxn(OperationContext* opCtx,
TxnNumber txnNumber,
TransactionActions action);
/**
- * Attaches the required transaction related fields for a request to be sent to the given
- * shard.
- *
- * Calling this method has the following side effects:
- * 1. Potentially selecting a coordinator.
- * 2. Adding the shard to the list of participants.
- * 3. Also append fields for first statements (ex. startTransaction, readConcern)
- * if the shard was newly added to the list of participants.
- */
+ * Attaches the required transaction related fields for a request to be sent to the given
+ * shard.
+ *
+ * Calling this method has the following side effects:
+ * 1. Potentially selecting a coordinator.
+ * 2. Adding the shard to the list of participants.
+ * 3. Also append fields for first statements (ex. startTransaction, readConcern)
+ * if the shard was newly added to the list of participants.
+ */
BSONObj attachTxnFieldsIfNeeded(OperationContext* opCtx,
const ShardId& shardId,
const BSONObj& cmdObj);
/**
- * Processes the transaction metadata in the response from the participant if the response
- * indicates the operation succeeded.
- */
+ * Processes the transaction metadata in the response from the participant if the response
+ * indicates the operation succeeded.
+ */
void processParticipantResponse(OperationContext* opCtx,
const ShardId& shardId,
const BSONObj& responseObj);
/**
- * Returns true if the current transaction can retry on a stale version error from a
- * contacted shard. This is always true except for an error received by a write that is not
- * the first overall statement in the sharded transaction. This is because the entire
- * command will be retried, and shards that were not stale and are targeted again may
- * incorrectly execute the command a second time.
- *
- * Note: Even if this method returns true, the retry attempt may still fail, e.g. if one of
- * the shards that returned a stale version error was involved in a previously completed a
- * statement for this transaction.
- *
- * TODO SERVER-37207: Change batch writes to retry only the failed writes in a batch, to
- * allow retrying writes beyond the first overall statement.
- */
+ * Returns true if the current transaction can retry on a stale version error from a
+ * contacted shard. This is always true except for an error received by a write that is not
+ * the first overall statement in the sharded transaction. This is because the entire
+ * command will be retried, and shards that were not stale and are targeted again may
+ * incorrectly execute the command a second time.
+ *
+ * Note: Even if this method returns true, the retry attempt may still fail, e.g. if one of
+ * the shards that returned a stale version error was involved in a previously completed a
+ * statement for this transaction.
+ *
+ * TODO SERVER-37207: Change batch writes to retry only the failed writes in a batch, to
+ * allow retrying writes beyond the first overall statement.
+ */
bool canContinueOnStaleShardOrDbError(StringData cmdName) const;
/**
- * Updates the transaction state to allow for a retry of the current command on a stale
- * version error. This includes sending abortTransaction to all cleared participants. Will
- * throw if the transaction cannot be continued.
- */
+ * Updates the transaction state to allow for a retry of the current command on a stale
+ * version error. This includes sending abortTransaction to all cleared participants. Will
+ * throw if the transaction cannot be continued.
+ */
void onStaleShardOrDbError(OperationContext* opCtx,
StringData cmdName,
const Status& errorStatus);
/**
- * Returns true if the current transaction can retry on a snapshot error. This is only true
- * on the first command recevied for a transaction.
- */
+ * Returns true if the current transaction can retry on a snapshot error. This is only true
+ * on the first command recevied for a transaction.
+ */
bool canContinueOnSnapshotError() const;
/**
- * Resets the transaction state to allow for a retry attempt. This includes clearing all
- * participants, clearing the coordinator, resetting the global read timestamp, and sending
- * abortTransaction to all cleared participants. Will throw if the transaction cannot be
- * continued.
- */
+ * Resets the transaction state to allow for a retry attempt. This includes clearing all
+ * participants, clearing the coordinator, resetting the global read timestamp, and sending
+ * abortTransaction to all cleared participants. Will throw if the transaction cannot be
+ * continued.
+ */
void onSnapshotError(OperationContext* opCtx, const Status& errorStatus);
/**
- * Updates the transaction tracking state to allow for a retry attempt on a view resolution
- * error. This includes sending abortTransaction to all cleared participants.
- */
+ * Updates the transaction tracking state to allow for a retry attempt on a view resolution
+ * error. This includes sending abortTransaction to all cleared participants.
+ */
void onViewResolutionError(OperationContext* opCtx, const NamespaceString& nss);
/**
@@ -301,206 +301,207 @@ public:
LogicalTime getSelectedAtClusterTime() const;
/**
- * Sets the atClusterTime for the current transaction to the latest time in the router's
- * logical clock. Does nothing if the transaction does not have snapshot read concern or an
- * atClusterTime has already been selected and cannot be changed.
- */
+ * Sets the atClusterTime for the current transaction to the latest time in the router's
+ * logical clock. Does nothing if the transaction does not have snapshot read concern or an
+ * atClusterTime has already been selected and cannot be changed.
+ */
void setDefaultAtClusterTime(OperationContext* opCtx);
/**
- * If a coordinator has been selected for the current transaction, returns its id.
- */
+ * If a coordinator has been selected for the current transaction, returns its id.
+ */
const boost::optional<ShardId>& getCoordinatorId() const;
/**
- * If a recovery shard has been selected for the current transaction, returns its id.
- */
+ * If a recovery shard has been selected for the current transaction, returns its id.
+ */
const boost::optional<ShardId>& getRecoveryShardId() const;
/**
- * Commits the transaction.
- *
- * For transactions that only did reads or only wrote to one shard, sends commit directly to
- * the participants and returns the first error response or the last (success) response.
- *
- * For transactions that performed writes to multiple shards, hands off the participant list
- * to the coordinator to do two-phase commit, and returns the coordinator's response.
- */
+ * Commits the transaction.
+ *
+ * For transactions that only did reads or only wrote to one shard, sends commit directly to
+ * the participants and returns the first error response or the last (success) response.
+ *
+ * For transactions that performed writes to multiple shards, hands off the participant list
+ * to the coordinator to do two-phase commit, and returns the coordinator's response.
+ */
BSONObj commitTransaction(OperationContext* opCtx,
const boost::optional<TxnRecoveryToken>& recoveryToken);
/**
- * Sends abort to all participants.
- *
- * Returns the first error response or the last (success) response.
- */
+ * Sends abort to all participants.
+ *
+ * Returns the first error response or the last (success) response.
+ */
BSONObj abortTransaction(OperationContext* opCtx);
/**
- * Sends abort to all shards in the current participant list. Will retry on retryable errors,
- * but ignores the responses from each shard.
- */
+ * Sends abort to all shards in the current participant list. Will retry on retryable
+ * errors, but ignores the responses from each shard.
+ */
void implicitlyAbortTransaction(OperationContext* opCtx, const Status& errorStatus);
/**
- * If a coordinator has been selected for this transaction already, constructs a recovery
- * token, which can be used to resume commit or abort of the transaction from a different
- * router.
- */
+ * If a coordinator has been selected for this transaction already, constructs a recovery
+ * token, which can be used to resume commit or abort of the transaction from a different
+ * router.
+ */
void appendRecoveryToken(BSONObjBuilder* builder) const;
/**
- * Returns a string with the active transaction's transaction number and logical session id
- * (i.e. the transaction id).
- */
+ * Returns a string with the active transaction's transaction number and logical session id
+ * (i.e. the transaction id).
+ */
std::string txnIdToString() const;
/**
- * Returns the participant for this transaction or nullptr if the specified shard is not
- * participant of this transaction.
- */
+ * Returns the participant for this transaction or nullptr if the specified shard is not
+ * participant of this transaction.
+ */
const Participant* getParticipant(const ShardId& shard);
/**
- * Returns the statement id of the latest received command for this transaction.
- */
+ * Returns the statement id of the latest received command for this transaction.
+ */
StmtId getLatestStmtId() const {
return p().latestStmtId;
}
/**
- * Returns a copy of the timing stats of the transaction router's active transaction.
- */
+ * Returns a copy of the timing stats of the transaction router's active transaction.
+ */
const TimingStats& getTimingStats() const {
return o().timingStats;
}
private:
/**
- * Resets the router's state. Used when the router sees a new transaction for the first time.
- * This is required because we don't create a new router object for each transaction, but
- * instead reuse the same object across different transactions.
- */
+ * Resets the router's state. Used when the router sees a new transaction for the first
+ * time. This is required because we don't create a new router object for each transaction,
+ * but instead reuse the same object across different transactions.
+ */
void _resetRouterState(OperationContext* opCtx, const TxnNumber& txnNumber);
/**
- * Internal method for committing a transaction. Should only throw on failure to send commit.
- */
+ * Internal method for committing a transaction. Should only throw on failure to send
+ * commit.
+ */
BSONObj _commitTransaction(OperationContext* opCtx,
const boost::optional<TxnRecoveryToken>& recoveryToken);
/**
- * Retrieves the transaction's outcome from the shard specified in the recovery token.
- */
+ * Retrieves the transaction's outcome from the shard specified in the recovery token.
+ */
BSONObj _commitWithRecoveryToken(OperationContext* opCtx,
const TxnRecoveryToken& recoveryToken);
/**
- * Hands off coordinating a two-phase commit across all participants to the coordinator
- * shard.
- */
+ * Hands off coordinating a two-phase commit across all participants to the coordinator
+ * shard.
+ */
BSONObj _handOffCommitToCoordinator(OperationContext* opCtx);
/**
- * Sets the given logical time as the atClusterTime for the transaction to be the greater of
- * the given time and the user's afterClusterTime, if one was provided.
- */
+ * Sets the given logical time as the atClusterTime for the transaction to be the greater of
+ * the given time and the user's afterClusterTime, if one was provided.
+ */
void _setAtClusterTime(OperationContext* opCtx,
const boost::optional<LogicalTime>& afterClusterTime,
LogicalTime candidateTime);
/**
- * Throws NoSuchTransaction if the response from abortTransaction failed with a code other
- * than NoSuchTransaction. Does not check for write concern errors.
- */
+ * Throws NoSuchTransaction if the response from abortTransaction failed with a code other
+ * than NoSuchTransaction. Does not check for write concern errors.
+ */
void _assertAbortStatusIsOkOrNoSuchTransaction(
const AsyncRequestsSender::Response& response) const;
/**
- * If the transaction's read concern level is snapshot, asserts the participant's
- * atClusterTime matches the transaction's.
- */
+ * If the transaction's read concern level is snapshot, asserts the participant's
+ * atClusterTime matches the transaction's.
+ */
void _verifyParticipantAtClusterTime(const Participant& participant);
/**
- * Removes all participants created during the current statement from the participant list
- * and sends abortTransaction to each. Waits for all responses before returning.
- */
+ * Removes all participants created during the current statement from the participant list
+ * and sends abortTransaction to each. Waits for all responses before returning.
+ */
void _clearPendingParticipants(OperationContext* opCtx);
/**
- * Creates a new participant for the shard.
- */
+ * Creates a new participant for the shard.
+ */
TransactionRouter::Participant& _createParticipant(OperationContext* opCtx,
const ShardId& shard);
/**
- * Sets the new readOnly value for the current participant on the shard.
- */
+ * Sets the new readOnly value for the current participant on the shard.
+ */
void _setReadOnlyForParticipant(OperationContext* opCtx,
const ShardId& shard,
const Participant::ReadOnly readOnly);
/**
- * Updates relevant metrics when a new transaction is begun.
- */
+ * Updates relevant metrics when a new transaction is begun.
+ */
void _onNewTransaction(OperationContext* opCtx);
/**
- * Updates relevant metrics when a router receives commit for a higher txnNumber than it has
- * seen so far.
- */
+ * Updates relevant metrics when a router receives commit for a higher txnNumber than it has
+ * seen so far.
+ */
void _onBeginRecoveringDecision(OperationContext* opCtx);
/**
- * Updates relevant metrics when the router receives an explicit abort from the client.
- */
+ * Updates relevant metrics when the router receives an explicit abort from the client.
+ */
void _onExplicitAbort(OperationContext* opCtx);
/**
- * Updates relevant metrics when the router begins an implicit abort after an error.
- */
+ * Updates relevant metrics when the router begins an implicit abort after an error.
+ */
void _onImplicitAbort(OperationContext* opCtx, const Status& errorStatus);
/**
- * Updates relevant metrics when a transaction is about to begin commit.
- */
+ * Updates relevant metrics when a transaction is about to begin commit.
+ */
void _onStartCommit(WithLock wl, OperationContext* opCtx);
/**
- * Updates relevant metrics when a transaction receives a successful response for commit.
- */
+ * Updates relevant metrics when a transaction receives a successful response for commit.
+ */
void _onSuccessfulCommit(OperationContext* opCtx);
/**
- * Updates relevant metrics when commit receives a response with a non-retryable command
- * error per the retryable writes specification.
- */
+ * Updates relevant metrics when commit receives a response with a non-retryable command
+ * error per the retryable writes specification.
+ */
void _onNonRetryableCommitError(OperationContext* opCtx, Status commitStatus);
/**
- * The first time this method is called it marks the transaction as over in the router's
- * diagnostics and will log transaction information if its duration is over the global slowMS
- * threshold or the transaction log componenet verbosity >= 1. Only meant to be called when
- * the router definitively knows the transaction's outcome, e.g. it should not be invoked
- * after a network error on commit.
- */
+ * The first time this method is called it marks the transaction as over in the router's
+ * diagnostics and will log transaction information if its duration is over the global
+ * slowMS threshold or the transaction log componenet verbosity >= 1. Only meant to be
+ * called when the router definitively knows the transaction's outcome, e.g. it should not
+ * be invoked after a network error on commit.
+ */
void _endTransactionTrackingIfNecessary(OperationContext* opCtx,
TerminationCause terminationCause);
/**
- * Returns all participants created during the current statement.
- */
+ * Returns all participants created during the current statement.
+ */
std::vector<ShardId> _getPendingParticipants() const;
/**
- * Prints slow transaction information to the log.
- */
+ * Prints slow transaction information to the log.
+ */
void _logSlowTransaction(OperationContext* opCtx, TerminationCause terminationCause) const;
/**
- * Returns a string to be logged for slow transactions.
- */
+ * Returns a string to be logged for slow transactions.
+ */
std::string _transactionInfoForLog(OperationContext* opCtx,
TerminationCause terminationCause) const;
diff --git a/src/mongo/s/transaction_router_test.cpp b/src/mongo/s/transaction_router_test.cpp
index 519b78091b8..36d4e3cb996 100644
--- a/src/mongo/s/transaction_router_test.cpp
+++ b/src/mongo/s/transaction_router_test.cpp
@@ -232,16 +232,9 @@ TEST_F(TransactionRouterTestWithDefaultSession,
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -258,11 +251,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
<< "test"));
ASSERT_BSONOBJ_EQ(BSON("update"
<< "test"
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
+ << "coordinator" << true << "autocommit" << false << "txnNumber"
<< txnNum),
newCmd);
}
@@ -281,16 +270,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, BasicStartTxnWithAtClusterTime)
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -307,11 +289,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, BasicStartTxnWithAtClusterTime)
<< "test"));
ASSERT_BSONOBJ_EQ(BSON("update"
<< "test"
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
+ << "coordinator" << true << "autocommit" << false << "txnNumber"
<< txnNum),
newCmd);
}
@@ -341,16 +319,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, NewParticipantMustAttachTxnAndRe
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -367,11 +338,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, NewParticipantMustAttachTxnAndRe
<< "test"));
ASSERT_BSONOBJ_EQ(BSON("update"
<< "test"
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
+ << "coordinator" << true << "autocommit" << false << "txnNumber"
<< txnNum),
newCmd);
}
@@ -381,13 +348,8 @@ TEST_F(TransactionRouterTestWithDefaultSession, NewParticipantMustAttachTxnAndRe
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "autocommit"
- << false
- << "txnNumber"
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "autocommit" << false << "txnNumber"
<< txnNum);
{
@@ -405,10 +367,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, NewParticipantMustAttachTxnAndRe
<< "test"));
ASSERT_BSONOBJ_EQ(BSON("update"
<< "test"
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum),
+ << "autocommit" << false << "txnNumber" << txnNum),
newCmd);
}
}
@@ -431,16 +390,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, StartingNewTxnShouldClearState)
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum),
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum),
newCmd);
}
@@ -454,16 +406,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, StartingNewTxnShouldClearState)
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum2);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum2);
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -707,26 +652,18 @@ TEST_F(TransactionRouterTestWithDefaultSession, DoesNotAttachTxnNumIfAlreadyTher
BSONObj expectedNewObj = BSON("insert"
<< "test"
- << "txnNumber"
- << txnNum
- << "readConcern"
+ << "txnNumber" << txnNum << "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false);
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
BSON("insert"
<< "test"
- << "txnNumber"
- << txnNum));
+ << "txnNumber" << txnNum));
ASSERT_BSONOBJ_EQ(expectedNewObj, newCmd);
}
@@ -744,8 +681,7 @@ DEATH_TEST_F(TransactionRouterTestWithDefaultSession,
shard1,
BSON("insert"
<< "test"
- << "txnNumber"
- << TxnNumber(10)));
+ << "txnNumber" << TxnNumber(10)));
}
TEST_F(TransactionRouterTestWithDefaultSession, AttachTxnValidatesReadConcernIfAlreadyOnCmd) {
@@ -769,16 +705,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, AttachTxnValidatesReadConcernIfA
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum),
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum),
newCmd);
}
}
@@ -810,14 +739,8 @@ TEST_F(TransactionRouterTestWithDefaultSession, PassesThroughNoReadConcernToPart
BSONObj expectedNewObj = BSON("insert"
<< "test"
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
@@ -843,14 +766,8 @@ TEST_F(TransactionRouterTestWithDefaultSession,
<< "test"
<< "readConcern"
<< BSON("afterClusterTime" << kAfterClusterTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
@@ -1489,8 +1406,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, SnapshotErrorsResetAtClusterTime
BSONObj expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp());
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp());
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -1516,8 +1432,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, SnapshotErrorsResetAtClusterTime
expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << laterTime.asTimestamp());
+ << "atClusterTime" << laterTime.asTimestamp());
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -1539,8 +1454,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
BSONObj expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp());
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp());
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -1560,8 +1474,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << laterTimeSameStmt.asTimestamp());
+ << "atClusterTime" << laterTimeSameStmt.asTimestamp());
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -1835,8 +1748,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
BSONObj expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp());
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp());
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
@@ -2348,8 +2260,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
BSONObj expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp());
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp());
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
@@ -3194,12 +3105,10 @@ TEST_F(TransactionRouterMetricsTest, SlowLoggingPrintsTransactionParameters) {
BSONObjBuilder lsidBob;
getSessionId().serialize(&lsidBob);
- ASSERT_EQUALS(
- 1,
- countLogLinesContaining(str::stream() << "parameters:{ lsid: " << lsidBob.done().toString()
- << ", txnNumber: "
- << kTxnNumber
- << ", autocommit: false"));
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ str::stream() << "parameters:{ lsid: " << lsidBob.done().toString()
+ << ", txnNumber: " << kTxnNumber << ", autocommit: false"));
}
TEST_F(TransactionRouterMetricsTest, SlowLoggingPrintsDurationAtEnd) {
diff --git a/src/mongo/s/write_ops/batch_downconvert.cpp b/src/mongo/s/write_ops/batch_downconvert.cpp
index f313a01b8dd..323af2928c1 100644
--- a/src/mongo/s/write_ops/batch_downconvert.cpp
+++ b/src/mongo/s/write_ops/batch_downconvert.cpp
@@ -78,14 +78,11 @@ Status extractGLEErrors(const BSONObj& gleResponse, GLEErrors* errors) {
}
errors->wcError->setStatus({ErrorCodes::WriteConcernFailed, msg});
errors->wcError->setErrInfo(BSON("wtimeout" << true));
- } else if (code == 10990 /* no longer primary */
- ||
- code == 16805 /* replicatedToNum no longer primary */
- ||
- code == 14830 /* gle wmode changed / invalid */
+ } else if (code == 10990 /* no longer primary */
+ || code == 16805 /* replicatedToNum no longer primary */
+ || code == 14830 /* gle wmode changed / invalid */
// 2.6 Error codes
- ||
- code == ErrorCodes::NotMaster || code == ErrorCodes::UnknownReplWriteConcern ||
+ || code == ErrorCodes::NotMaster || code == ErrorCodes::UnknownReplWriteConcern ||
code == ErrorCodes::WriteConcernFailed || code == ErrorCodes::PrimarySteppedDown) {
// Write concern errors that get returned as regular errors (result may not be ok: 1.0)
errors->wcError.reset(new WriteConcernErrorDetail());
diff --git a/src/mongo/s/write_ops/batch_downconvert_test.cpp b/src/mongo/s/write_ops/batch_downconvert_test.cpp
index ca9a3cd34d9..a45e7ac1aaa 100644
--- a/src/mongo/s/write_ops/batch_downconvert_test.cpp
+++ b/src/mongo/s/write_ops/batch_downconvert_test.cpp
@@ -40,8 +40,8 @@
namespace {
using namespace mongo;
-using std::vector;
using std::deque;
+using std::vector;
//
// Tests for parsing GLE responses into write errors and write concern errors for write
@@ -205,14 +205,9 @@ TEST(LegacyGLESuppress, StripCode) {
TEST(LegacyGLESuppress, TimeoutDupError24) {
const BSONObj gleResponse = BSON("ok" << 0.0 << "err"
<< "message"
- << "code"
- << 12345
- << "err"
+ << "code" << 12345 << "err"
<< "timeout"
- << "code"
- << 56789
- << "wtimeout"
- << true);
+ << "code" << 56789 << "wtimeout" << true);
BSONObj stripped = stripNonWCInfo(gleResponse);
ASSERT_EQUALS(stripped.nFields(), 4);
@@ -221,4 +216,4 @@ TEST(LegacyGLESuppress, TimeoutDupError24) {
ASSERT_EQUALS(stripped["code"].numberInt(), 56789);
ASSERT(stripped["wtimeout"].trueValue());
}
-}
+} // namespace
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index 4412cd325ef..b06b0c1c63b 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -428,14 +428,9 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx,
batchOp.abortBatch(errorFromStatus(
{ErrorCodes::NoProgressMade,
str::stream() << "no progress was made executing batch write op in "
- << clientRequest.getNS().ns()
- << " after "
- << kMaxRoundsWithoutProgress
- << " rounds ("
- << numCompletedOps
- << " ops completed in "
- << rounds
- << " rounds total)"}));
+ << clientRequest.getNS().ns() << " after "
+ << kMaxRoundsWithoutProgress << " rounds (" << numCompletedOps
+ << " ops completed in " << rounds << " rounds total)"}));
break;
}
}
@@ -469,4 +464,4 @@ const HostOpTimeMap& BatchWriteExecStats::getWriteOpTimes() const {
return _writeOpTimes;
}
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp
index cc1d18e2f30..5a4c9ac1253 100644
--- a/src/mongo/s/write_ops/batch_write_op.cpp
+++ b/src/mongo/s/write_ops/batch_write_op.cpp
@@ -42,9 +42,9 @@
namespace mongo {
-using std::unique_ptr;
using std::set;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
namespace {
@@ -171,9 +171,9 @@ int getWriteSizeBytes(const WriteOp& writeOp) {
static const auto boolSize = 1;
// Add the size of the 'collation' field, if present.
- estSize +=
- !item.getUpdate().getCollation() ? 0 : (UpdateOpEntry::kCollationFieldName.size() +
- item.getUpdate().getCollation()->objsize());
+ estSize += !item.getUpdate().getCollation() ? 0
+ : (UpdateOpEntry::kCollationFieldName.size() +
+ item.getUpdate().getCollation()->objsize());
// Add the size of the 'arrayFilters' field, if present.
estSize += !item.getUpdate().getArrayFilters() ? 0 : ([&item]() {
@@ -209,9 +209,9 @@ int getWriteSizeBytes(const WriteOp& writeOp) {
static const auto intSize = 4;
// Add the size of the 'collation' field, if present.
- estSize +=
- !item.getDelete().getCollation() ? 0 : (DeleteOpEntry::kCollationFieldName.size() +
- item.getDelete().getCollation()->objsize());
+ estSize += !item.getDelete().getCollation() ? 0
+ : (DeleteOpEntry::kCollationFieldName.size() +
+ item.getDelete().getCollation()->objsize());
// Add the size of the 'limit' field.
estSize += DeleteOpEntry::kMultiFieldName.size() + intSize;
@@ -592,7 +592,7 @@ void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch,
vector<WriteErrorDetail*>::iterator itemErrorIt = itemErrors.begin();
int index = 0;
WriteErrorDetail* lastError = nullptr;
- for (vector<TargetedWrite *>::const_iterator it = targetedBatch.getWrites().begin();
+ for (vector<TargetedWrite*>::const_iterator it = targetedBatch.getWrites().begin();
it != targetedBatch.getWrites().end();
++it, ++index) {
const TargetedWrite* write = *it;
@@ -766,9 +766,9 @@ void BatchWriteOp::buildClientResponse(BatchedCommandResponse* batchResp) {
// Generate the multi-error message below
if (_wcErrors.size() == 1) {
auto status = _wcErrors.front().error.toStatus();
- error->setStatus(
- status.withReason(str::stream() << status.reason() << " at "
- << _wcErrors.front().endpoint.shardName));
+ error->setStatus(status.withReason(str::stream()
+ << status.reason() << " at "
+ << _wcErrors.front().endpoint.shardName));
} else {
StringBuilder msg;
msg << "multiple errors reported : ";
diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp
index 46de1d76efb..603cbff2ba2 100644
--- a/src/mongo/s/write_ops/batched_command_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_request_test.cpp
@@ -44,14 +44,9 @@ TEST(BatchedCommandRequest, BasicInsert) {
BSONObj origInsertRequestObj = BSON("insert"
<< "test"
- << "documents"
- << insertArray
- << "writeConcern"
- << BSON("w" << 1)
- << "ordered"
- << true
- << "allowImplicitCollectionCreation"
- << false);
+ << "documents" << insertArray << "writeConcern"
+ << BSON("w" << 1) << "ordered" << true
+ << "allowImplicitCollectionCreation" << false);
for (auto docSeq : {false, true}) {
const auto opMsgRequest(toOpMsg("TestDB", origInsertRequestObj, docSeq));
@@ -70,13 +65,8 @@ TEST(BatchedCommandRequest, InsertWithShardVersion) {
BSONObj origInsertRequestObj = BSON("insert"
<< "test"
- << "documents"
- << insertArray
- << "writeConcern"
- << BSON("w" << 1)
- << "ordered"
- << true
- << "shardVersion"
+ << "documents" << insertArray << "writeConcern"
+ << BSON("w" << 1) << "ordered" << true << "shardVersion"
<< BSON_ARRAY(Timestamp(1, 2) << epoch));
for (auto docSeq : {false, true}) {
diff --git a/src/mongo/s/write_ops/batched_command_response.cpp b/src/mongo/s/write_ops/batched_command_response.cpp
index 0c2396f7499..8a4eae3c278 100644
--- a/src/mongo/s/write_ops/batched_command_response.cpp
+++ b/src/mongo/s/write_ops/batched_command_response.cpp
@@ -40,8 +40,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using str::stream;
@@ -112,8 +112,8 @@ BSONObj BatchedCommandResponse::toBSON() const {
builder.appendOID(electionId(), const_cast<OID*>(&_electionId));
if (_writeErrorDetails.get()) {
- auto errorMessage =
- [ errorCount = size_t(0), errorSize = size_t(0) ](StringData rawMessage) mutable {
+ auto errorMessage = [errorCount = size_t(0),
+ errorSize = size_t(0)](StringData rawMessage) mutable {
// Start truncating error messages once both of these limits are exceeded.
constexpr size_t kErrorSizeTruncationMin = 1024 * 1024;
constexpr size_t kErrorCountTruncationMin = 2;
diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp
index b34f8cb7770..cd3fc8bcb32 100644
--- a/src/mongo/s/write_ops/batched_command_response_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_response_test.cpp
@@ -58,17 +58,13 @@ TEST(BatchedCommandResponse, Basic) {
BSONObj writeConcernError(
BSON("code" << 8 << "codeName" << ErrorCodes::errorString(ErrorCodes::Error(8)) << "errmsg"
<< "norepl"
- << "errInfo"
- << BSON("a" << 1)));
+ << "errInfo" << BSON("a" << 1)));
BSONObj origResponseObj =
- BSON(BatchedCommandResponse::n(0) << "opTime" << mongo::Timestamp(1ULL)
- << BatchedCommandResponse::writeErrors()
- << writeErrorsArray
- << BatchedCommandResponse::writeConcernError()
- << writeConcernError
- << "ok"
- << 1.0);
+ BSON(BatchedCommandResponse::n(0)
+ << "opTime" << mongo::Timestamp(1ULL) << BatchedCommandResponse::writeErrors()
+ << writeErrorsArray << BatchedCommandResponse::writeConcernError() << writeConcernError
+ << "ok" << 1.0);
string errMsg;
BatchedCommandResponse response;
diff --git a/src/mongo/s/write_ops/chunk_manager_targeter.cpp b/src/mongo/s/write_ops/chunk_manager_targeter.cpp
index 097d7143cee..96a6d301c97 100644
--- a/src/mongo/s/write_ops/chunk_manager_targeter.cpp
+++ b/src/mongo/s/write_ops/chunk_manager_targeter.cpp
@@ -322,9 +322,9 @@ bool isMetadataDifferent(const std::shared_ptr<ChunkManager>& managerA,
}
/**
-* Whether or not the manager/primary pair was changed or refreshed from a previous version
-* of the metadata.
-*/
+ * Whether or not the manager/primary pair was changed or refreshed from a previous version
+ * of the metadata.
+ */
bool wasMetadataRefreshed(const std::shared_ptr<ChunkManager>& managerA,
const std::shared_ptr<Shard>& primaryA,
const std::shared_ptr<ChunkManager>& managerB,
@@ -456,8 +456,9 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetUpdate(
}
// Utility function to target an update by shard key, and to handle any potential error results.
- const auto targetByShardKey = [&collation, this](
- StatusWith<BSONObj> shardKey, StringData msg) -> StatusWith<std::vector<ShardEndpoint>> {
+ const auto targetByShardKey = [&collation,
+ this](StatusWith<BSONObj> shardKey,
+ StringData msg) -> StatusWith<std::vector<ShardEndpoint>> {
if (!shardKey.isOK()) {
return shardKey.getStatus().withContext(msg);
}
@@ -505,10 +506,8 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetUpdate(
"collation) or must target a single shard (and have the simple "
"collation), but this update targeted "
<< shardEndPoints.getValue().size()
- << " shards. Update request: "
- << updateDoc.toBSON()
- << ", shard key pattern: "
- << shardKeyPattern.toString()};
+ << " shards. Update request: " << updateDoc.toBSON()
+ << ", shard key pattern: " << shardKeyPattern.toString()};
}
// If the request is {multi:false}, then this is a single op-style update which we are
@@ -567,8 +566,8 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetDelete(
ExtensionsCallbackNoop(),
MatchExpressionParser::kAllowAllSpecialFeatures);
if (!cq.isOK()) {
- return cq.getStatus().withContext(str::stream() << "Could not parse delete query "
- << deleteDoc.getQ());
+ return cq.getStatus().withContext(str::stream()
+ << "Could not parse delete query " << deleteDoc.getQ());
}
// Single deletes must target a single shard or be exact-ID.
@@ -580,8 +579,7 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetDelete(
"match on _id (and have the collection default collation) or "
"contain the shard key (and have the simple collation). Delete "
"request: "
- << deleteDoc.toBSON()
- << ", shard key pattern: "
+ << deleteDoc.toBSON() << ", shard key pattern: "
<< _routingInfo->cm()->getShardKeyPattern().toString());
}
diff --git a/src/mongo/scripting/bson_template_evaluator.h b/src/mongo/scripting/bson_template_evaluator.h
index 90fb9fe5b6f..472f7452d75 100644
--- a/src/mongo/scripting/bson_template_evaluator.h
+++ b/src/mongo/scripting/bson_template_evaluator.h
@@ -269,4 +269,4 @@ private:
PseudoRandom rng;
};
-} // end namespace
+} // namespace mongo
diff --git a/src/mongo/scripting/bson_template_evaluator_test.cpp b/src/mongo/scripting/bson_template_evaluator_test.cpp
index cebc0281576..6309fb79f35 100644
--- a/src/mongo/scripting/bson_template_evaluator_test.cpp
+++ b/src/mongo/scripting/bson_template_evaluator_test.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/scripting/bson_template_evaluator.h"
#include "mongo/db/jsobj.h"
+#include "mongo/scripting/bson_template_evaluator.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -95,8 +95,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField" << randObj << "hello"
<< "world"
- << "id"
- << 1),
+ << "id" << 1),
builder8));
BSONObj obj8 = builder8.obj();
ASSERT_EQUALS(obj8.nFields(), 3);
@@ -123,8 +122,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField1" << randObj << "hello"
<< "world"
- << "randField2"
- << randObj),
+ << "randField2" << randObj),
builder10));
BSONObj obj10 = builder10.obj();
ASSERT_EQUALS(obj10.nFields(), 3);
@@ -141,8 +139,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("testArray" << BSON_ARRAY(0 << 5 << 10 << 20) << "hello"
<< "world"
- << "randField"
- << randObj),
+ << "randField" << randObj),
builder11));
BSONObj obj11 = builder11.obj();
ASSERT_EQUALS(obj11.nFields(), 3);
@@ -192,8 +189,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT_PLUS_THREAD) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField" << randObj << "hello"
<< "world"
- << "id"
- << 1),
+ << "id" << 1),
builder8));
BSONObj obj8 = builder8.obj();
ASSERT_EQUALS(obj8.nFields(), 3);
@@ -220,8 +216,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT_PLUS_THREAD) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField1" << randObj << "hello"
<< "world"
- << "randField2"
- << randObj),
+ << "randField2" << randObj),
builder10));
BSONObj obj10 = builder10.obj();
ASSERT_EQUALS(obj10.nFields(), 3);
@@ -240,8 +235,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT_PLUS_THREAD) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("testArray" << BSON_ARRAY(0 << 5 << 10 << 20) << "hello"
<< "world"
- << "randField"
- << randObj),
+ << "randField" << randObj),
builder11));
BSONObj obj11 = builder11.obj();
ASSERT_EQUALS(obj11.nFields(), 3);
@@ -445,8 +439,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_STRING) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("id" << 1 << "hello"
<< "world"
- << "randField"
- << randObj),
+ << "randField" << randObj),
builder6));
BSONObj obj6 = builder6.obj();
ASSERT_EQUALS(obj6.nFields(), 3);
@@ -460,8 +453,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_STRING) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField" << randObj << "hello"
<< "world"
- << "id"
- << 1),
+ << "id" << 1),
builder7));
BSONObj obj7 = builder7.obj();
ASSERT_EQUALS(obj7.nFields(), 3);
@@ -486,8 +478,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_STRING) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField1" << randObj << "hello"
<< "world"
- << "randField2"
- << randObj),
+ << "randField2" << randObj),
builder10));
BSONObj obj10 = builder10.obj();
ASSERT_EQUALS(obj10.nFields(), 3);
@@ -503,8 +494,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_STRING) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("testArray" << BSON_ARRAY(0 << 5 << 10 << 20) << "hello"
<< "world"
- << "randField"
- << randObj),
+ << "randField" << randObj),
builder11));
BSONObj obj11 = builder11.obj();
ASSERT_EQUALS(obj11.nFields(), 3);
@@ -559,9 +549,7 @@ TEST(BSONTemplateEvaluatorTest, CONCAT) {
ASSERT_EQUALS(obj4.nFields(), 3);
expectedObj = BSON("concatField1"
<< "hello world"
- << "middleKey"
- << 1
- << "concatField2"
+ << "middleKey" << 1 << "concatField2"
<< "hello world");
ASSERT_BSONOBJ_EQ(obj4, expectedObj);
@@ -683,8 +671,7 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
BSONObj bazObj = BSON("baz" << innerObj);
outerObj = BSON("foo"
<< "hi"
- << "bar"
- << bazObj);
+ << "bar" << bazObj);
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("id" << outerObj), builder3));
BSONObj obj3 = builder3.obj();
@@ -705,10 +692,7 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
<< "bye");
outerObj = BSON("foo"
<< "hi"
- << "bar"
- << barObj4
- << "baz"
- << bazObj4);
+ << "bar" << barObj4 << "baz" << bazObj4);
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("id" << outerObj), builder4));
BSONObj obj4 = builder4.obj();
@@ -732,8 +716,7 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
<< "let"
<< "target"
<< "x"
- << "value"
- << innerObj);
+ << "value" << innerObj);
ASSERT_EQUALS(BsonTemplateEvaluator::StatusBadOperator, t.evaluate(outerObj, builder5));
// Test success for elements in an array that need evaluation
@@ -744,8 +727,7 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
BSONObj elem3 = BSON("baz" << 42);
outerObj = BSON("foo"
<< "hi"
- << "bar"
- << BSON_ARRAY(elem1 << elem2 << elem3 << 7));
+ << "bar" << BSON_ARRAY(elem1 << elem2 << elem3 << 7));
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess, t.evaluate(outerObj, builder6));
BSONObj obj6 = builder6.obj();
BSONElement obj6_bar = obj6["bar"];
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
index 93f70408757..d1a97725786 100644
--- a/src/mongo/scripting/engine.cpp
+++ b/src/mongo/scripting/engine.cpp
@@ -240,9 +240,9 @@ void Scope::loadStored(OperationContext* opCtx, bool ignoreNotConnected) {
if (MONGO_FAIL_POINT(mr_killop_test_fp)) {
/* This thread sleep makes the interrupts in the test come in at a time
- * where the js misses the interrupt and throw an exception instead of
- * being interrupted
- */
+ * where the js misses the interrupt and throw an exception instead of
+ * being interrupted
+ */
stdx::this_thread::sleep_for(stdx::chrono::seconds(1));
}
@@ -309,7 +309,7 @@ extern const JSFile utils_sh;
extern const JSFile utils_auth;
extern const JSFile bulk_api;
extern const JSFile error_codes;
-}
+} // namespace JSFiles
void Scope::execCoreFiles() {
execSetup(JSFiles::utils);
diff --git a/src/mongo/scripting/engine.h b/src/mongo/scripting/engine.h
index a1f02724ee2..b50c75baed6 100644
--- a/src/mongo/scripting/engine.h
+++ b/src/mongo/scripting/engine.h
@@ -279,4 +279,4 @@ const char* jsSkipWhiteSpace(const char* raw);
ScriptEngine* getGlobalScriptEngine();
void setGlobalScriptEngine(ScriptEngine* impl);
-}
+} // namespace mongo
diff --git a/src/mongo/scripting/engine_none.cpp b/src/mongo/scripting/engine_none.cpp
index d6297be697a..0262fbf24fe 100644
--- a/src/mongo/scripting/engine_none.cpp
+++ b/src/mongo/scripting/engine_none.cpp
@@ -37,4 +37,4 @@ void ScriptEngine::setup() {
std::string ScriptEngine::getInterpreterVersionString() {
return "";
}
-}
+} // namespace mongo
diff --git a/src/mongo/scripting/mozjs/bson.cpp b/src/mongo/scripting/mozjs/bson.cpp
index abef2b769a0..7972cdbaca0 100644
--- a/src/mongo/scripting/mozjs/bson.cpp
+++ b/src/mongo/scripting/mozjs/bson.cpp
@@ -47,7 +47,9 @@ namespace mozjs {
const char* const BSONInfo::className = "BSON";
const JSFunctionSpec BSONInfo::freeFunctions[3] = {
- MONGO_ATTACH_JS_FUNCTION(bsonWoCompare), MONGO_ATTACH_JS_FUNCTION(bsonBinaryEqual), JS_FS_END,
+ MONGO_ATTACH_JS_FUNCTION(bsonWoCompare),
+ MONGO_ATTACH_JS_FUNCTION(bsonBinaryEqual),
+ JS_FS_END,
};
diff --git a/src/mongo/scripting/mozjs/code.cpp b/src/mongo/scripting/mozjs/code.cpp
index ed34739c07e..2744558dffe 100644
--- a/src/mongo/scripting/mozjs/code.cpp
+++ b/src/mongo/scripting/mozjs/code.cpp
@@ -44,7 +44,8 @@ namespace mongo {
namespace mozjs {
const JSFunctionSpec CodeInfo::methods[2] = {
- MONGO_ATTACH_JS_CONSTRAINED_METHOD(toString, CodeInfo), JS_FS_END,
+ MONGO_ATTACH_JS_CONSTRAINED_METHOD(toString, CodeInfo),
+ JS_FS_END,
};
const char* const CodeInfo::className = "Code";
@@ -52,9 +53,9 @@ const char* const CodeInfo::className = "Code";
void CodeInfo::Functions::toString::call(JSContext* cx, JS::CallArgs args) {
ObjectWrapper o(cx, args.thisv());
- std::string str = str::stream() << "Code({\"code\":\"" << o.getString(InternedString::code)
- << "\","
- << "\"scope\":" << o.getObject(InternedString::scope) << "\"})";
+ std::string str = str::stream()
+ << "Code({\"code\":\"" << o.getString(InternedString::code) << "\","
+ << "\"scope\":" << o.getObject(InternedString::scope) << "\"})";
ValueReader(cx, args.rval()).fromStringData(str);
}
diff --git a/src/mongo/scripting/mozjs/cursor_handle.cpp b/src/mongo/scripting/mozjs/cursor_handle.cpp
index 28c7a483936..ee781700357 100644
--- a/src/mongo/scripting/mozjs/cursor_handle.cpp
+++ b/src/mongo/scripting/mozjs/cursor_handle.cpp
@@ -41,7 +41,8 @@ namespace mongo {
namespace mozjs {
const JSFunctionSpec CursorHandleInfo::methods[2] = {
- MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(zeroCursorId, CursorHandleInfo), JS_FS_END,
+ MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(zeroCursorId, CursorHandleInfo),
+ JS_FS_END,
};
const char* const CursorHandleInfo::className = "CursorHandle";
diff --git a/src/mongo/scripting/mozjs/implscope.cpp b/src/mongo/scripting/mozjs/implscope.cpp
index 33ea1308953..358106b0de5 100644
--- a/src/mongo/scripting/mozjs/implscope.cpp
+++ b/src/mongo/scripting/mozjs/implscope.cpp
@@ -62,7 +62,7 @@ namespace mongo {
namespace JSFiles {
extern const JSFile types;
extern const JSFile assert;
-} // namespace
+} // namespace JSFiles
namespace mozjs {
diff --git a/src/mongo/scripting/mozjs/mongo.cpp b/src/mongo/scripting/mozjs/mongo.cpp
index e86f0582bb4..2f0a030bde1 100644
--- a/src/mongo/scripting/mozjs/mongo.cpp
+++ b/src/mongo/scripting/mozjs/mongo.cpp
@@ -658,12 +658,11 @@ void MongoBase::Functions::copyDatabaseWithSCRAM::call(JSContext* cx, JS::CallAr
BSONObj saslFirstCommandPrefix =
BSON("copydbsaslstart" << 1 << "fromhost" << fromHost << "fromdb" << fromDb
- << saslCommandMechanismFieldName
- << "SCRAM-SHA-1");
+ << saslCommandMechanismFieldName << "SCRAM-SHA-1");
- BSONObj saslFollowupCommandPrefix = BSON(
- "copydb" << 1 << "fromhost" << fromHost << "fromdb" << fromDb << "todb" << toDb << "slaveOk"
- << slaveOk);
+ BSONObj saslFollowupCommandPrefix =
+ BSON("copydb" << 1 << "fromhost" << fromHost << "fromdb" << fromDb << "todb" << toDb
+ << "slaveOk" << slaveOk);
BSONObj saslCommandPrefix = saslFirstCommandPrefix;
BSONObj inputObj = BSON(saslCommandPayloadFieldName << "");
diff --git a/src/mongo/scripting/mozjs/mongohelpers.js b/src/mongo/scripting/mozjs/mongohelpers.js
index a59c7787918..c8edfb23ca6 100644
--- a/src/mongo/scripting/mozjs/mongohelpers.js
+++ b/src/mongo/scripting/mozjs/mongohelpers.js
@@ -35,7 +35,6 @@
exportToMongoHelpers = {
// This function accepts an expression or function body and returns a function definition
'functionExpressionParser': function functionExpressionParser(fnSrc) {
-
// Ensure that a provided expression or function body is not terminated with a ';'.
// This ensures we interpret the input as a single expression, rather than a sequence
// of expressions, and can wrap it in parentheses.
@@ -52,7 +51,7 @@ exportToMongoHelpers = {
} else if (e == 'SyntaxError: return not in function') {
return 'function() { ' + fnSrc + ' }';
} else {
- throw(e);
+ throw (e);
}
}
// Input source is a series of expressions. we should prepend the last one with return
diff --git a/src/mongo/scripting/mozjs/nativefunction.cpp b/src/mongo/scripting/mozjs/nativefunction.cpp
index a23b7174311..135d2600e44 100644
--- a/src/mongo/scripting/mozjs/nativefunction.cpp
+++ b/src/mongo/scripting/mozjs/nativefunction.cpp
@@ -47,7 +47,8 @@ const char* const NativeFunctionInfo::inheritFrom = "Function";
const char* const NativeFunctionInfo::className = "NativeFunction";
const JSFunctionSpec NativeFunctionInfo::methods[2] = {
- MONGO_ATTACH_JS_CONSTRAINED_METHOD(toString, NativeFunctionInfo), JS_FS_END,
+ MONGO_ATTACH_JS_CONSTRAINED_METHOD(toString, NativeFunctionInfo),
+ JS_FS_END,
};
namespace {
diff --git a/src/mongo/scripting/mozjs/object.cpp b/src/mongo/scripting/mozjs/object.cpp
index 3f9c84df90d..ec1de920391 100644
--- a/src/mongo/scripting/mozjs/object.cpp
+++ b/src/mongo/scripting/mozjs/object.cpp
@@ -40,7 +40,8 @@ namespace mongo {
namespace mozjs {
const JSFunctionSpec ObjectInfo::methods[2] = {
- MONGO_ATTACH_JS_FUNCTION(bsonsize), JS_FS_END,
+ MONGO_ATTACH_JS_FUNCTION(bsonsize),
+ JS_FS_END,
};
const char* const ObjectInfo::className = "Object";
diff --git a/src/mongo/scripting/mozjs/objectwrapper.cpp b/src/mongo/scripting/mozjs/objectwrapper.cpp
index 3c57e262029..d934c28ed37 100644
--- a/src/mongo/scripting/mozjs/objectwrapper.cpp
+++ b/src/mongo/scripting/mozjs/objectwrapper.cpp
@@ -615,11 +615,8 @@ BSONObj ObjectWrapper::toBSON() {
const int sizeWithEOO = b.len() + 1 /*EOO*/ - 4 /*BSONObj::Holder ref count*/;
uassert(17260,
str::stream() << "Converting from JavaScript to BSON failed: "
- << "Object size "
- << sizeWithEOO
- << " exceeds limit of "
- << BSONObjMaxInternalSize
- << " bytes.",
+ << "Object size " << sizeWithEOO << " exceeds limit of "
+ << BSONObjMaxInternalSize << " bytes.",
sizeWithEOO <= BSONObjMaxInternalSize);
return b.obj();
diff --git a/src/mongo/scripting/mozjs/regexp.cpp b/src/mongo/scripting/mozjs/regexp.cpp
index b2e4d0b85a7..75d7a7ac915 100644
--- a/src/mongo/scripting/mozjs/regexp.cpp
+++ b/src/mongo/scripting/mozjs/regexp.cpp
@@ -37,7 +37,8 @@ namespace mongo {
namespace mozjs {
const JSFunctionSpec RegExpInfo::methods[2] = {
- MONGO_ATTACH_JS_FUNCTION(toJSON), JS_FS_END,
+ MONGO_ATTACH_JS_FUNCTION(toJSON),
+ JS_FS_END,
};
const char* const RegExpInfo::className = "RegExp";
diff --git a/src/mongo/scripting/mozjs/session.cpp b/src/mongo/scripting/mozjs/session.cpp
index d1617892a4c..7e785888a52 100644
--- a/src/mongo/scripting/mozjs/session.cpp
+++ b/src/mongo/scripting/mozjs/session.cpp
@@ -122,9 +122,7 @@ void endSession(SessionHolder* holder) {
if (holder->txnState == SessionHolder::TransactionState::kActive) {
holder->txnState = SessionHolder::TransactionState::kAborted;
BSONObj abortObj = BSON("abortTransaction" << 1 << "lsid" << holder->lsid << "txnNumber"
- << holder->txnNumber
- << "autocommit"
- << false);
+ << holder->txnNumber << "autocommit" << false);
MONGO_COMPILER_VARIABLE_UNUSED auto ignored =
holder->client->runCommand("admin", abortObj, out);
diff --git a/src/mongo/scripting/mozjs/timestamp.cpp b/src/mongo/scripting/mozjs/timestamp.cpp
index 88f9331bef3..e114535afee 100644
--- a/src/mongo/scripting/mozjs/timestamp.cpp
+++ b/src/mongo/scripting/mozjs/timestamp.cpp
@@ -46,7 +46,8 @@ namespace mongo {
namespace mozjs {
const JSFunctionSpec TimestampInfo::methods[2] = {
- MONGO_ATTACH_JS_CONSTRAINED_METHOD(toJSON, TimestampInfo), JS_FS_END,
+ MONGO_ATTACH_JS_CONSTRAINED_METHOD(toJSON, TimestampInfo),
+ JS_FS_END,
};
const char* const TimestampInfo::className = "Timestamp";
@@ -62,9 +63,7 @@ double getTimestampArg(JSContext* cx, JS::CallArgs args, int idx, std::string na
if (val < 0 || val > maxArgVal) {
uasserted(ErrorCodes::BadValue,
str::stream() << name << " must be non-negative and not greater than "
- << maxArgVal
- << ", got "
- << val);
+ << maxArgVal << ", got " << val);
}
return val;
}
diff --git a/src/mongo/scripting/mozjs/uri.cpp b/src/mongo/scripting/mozjs/uri.cpp
index f0bacecb92e..fa830bf064d 100644
--- a/src/mongo/scripting/mozjs/uri.cpp
+++ b/src/mongo/scripting/mozjs/uri.cpp
@@ -47,7 +47,8 @@ namespace mongo {
namespace mozjs {
const JSFunctionSpec URIInfo::methods[2] = {
- MONGO_ATTACH_JS_CONSTRAINED_METHOD(toString, URIInfo), JS_FS_END,
+ MONGO_ATTACH_JS_CONSTRAINED_METHOD(toString, URIInfo),
+ JS_FS_END,
};
const char* const URIInfo::className = "MongoURI";
diff --git a/src/mongo/scripting/mozjs/valuewriter.cpp b/src/mongo/scripting/mozjs/valuewriter.cpp
index ef3b0b4d428..f40ef984576 100644
--- a/src/mongo/scripting/mozjs/valuewriter.cpp
+++ b/src/mongo/scripting/mozjs/valuewriter.cpp
@@ -316,8 +316,7 @@ void ValueWriter::_writeObject(BSONObjBuilder* b,
if (scope->getProto<CodeInfo>().getJSClass() == jsclass) {
if (o.hasOwnField(InternedString::scope) // CodeWScope
- &&
- o.type(InternedString::scope) == mongo::Object) {
+ && o.type(InternedString::scope) == mongo::Object) {
if (o.type(InternedString::code) != mongo::String) {
uasserted(ErrorCodes::BadValue, "code must be a string");
}
diff --git a/src/mongo/scripting/mozjs/wrapconstrainedmethod.h b/src/mongo/scripting/mozjs/wrapconstrainedmethod.h
index 12a94458896..e5110b1bd1c 100644
--- a/src/mongo/scripting/mozjs/wrapconstrainedmethod.h
+++ b/src/mongo/scripting/mozjs/wrapconstrainedmethod.h
@@ -94,24 +94,21 @@ bool wrapConstrainedMethod(JSContext* cx, unsigned argc, JS::Value* vp) {
if (!args.thisv().isObject()) {
uasserted(ErrorCodes::BadValue,
- str::stream() << "Cannot call \"" << T::name()
- << "\" on non-object of type \""
- << ValueWriter(cx, args.thisv()).typeAsString()
- << "\"");
+ str::stream()
+ << "Cannot call \"" << T::name() << "\" on non-object of type \""
+ << ValueWriter(cx, args.thisv()).typeAsString() << "\"");
}
if (!instanceOf<Args..., void>(getScope(cx), &isProto, args.thisv())) {
uasserted(ErrorCodes::BadValue,
str::stream() << "Cannot call \"" << T::name() << "\" on object of type \""
- << ObjectWrapper(cx, args.thisv()).getClassName()
- << "\"");
+ << ObjectWrapper(cx, args.thisv()).getClassName() << "\"");
}
if (noProto && isProto) {
uasserted(ErrorCodes::BadValue,
str::stream() << "Cannot call \"" << T::name() << "\" on prototype of \""
- << ObjectWrapper(cx, args.thisv()).getClassName()
- << "\"");
+ << ObjectWrapper(cx, args.thisv()).getClassName() << "\"");
}
T::call(cx, args);
diff --git a/src/mongo/scripting/mozjs/wraptype.h b/src/mongo/scripting/mozjs/wraptype.h
index e2ca4b358be..e3e4acde7bd 100644
--- a/src/mongo/scripting/mozjs/wraptype.h
+++ b/src/mongo/scripting/mozjs/wraptype.h
@@ -67,20 +67,22 @@
#define MONGO_ATTACH_JS_FUNCTION(name) MONGO_ATTACH_JS_FUNCTION_WITH_FLAGS(name, 0)
-#define MONGO_ATTACH_JS_CONSTRAINED_METHOD(name, ...) \
- { \
- #name, {smUtils::wrapConstrainedMethod < Functions::name, false, __VA_ARGS__ >, nullptr }, \
- 0, \
- 0, \
- nullptr \
- }
-
-#define MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(name, ...) \
- { \
- #name, {smUtils::wrapConstrainedMethod < Functions::name, true, __VA_ARGS__ >, nullptr }, \
- 0, \
- 0, \
- nullptr \
+#define MONGO_ATTACH_JS_CONSTRAINED_METHOD(name, ...) \
+ { \
+#name, \
+ {smUtils::wrapConstrainedMethod < Functions::name, false, __VA_ARGS__>, nullptr }, \
+ 0, \
+ 0, \
+ nullptr \
+ }
+
+#define MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(name, ...) \
+ { \
+#name, \
+ {smUtils::wrapConstrainedMethod < Functions::name, true, __VA_ARGS__>, nullptr }, \
+ 0, \
+ 0, \
+ nullptr \
}
namespace mongo {
diff --git a/src/mongo/shell/assert.js b/src/mongo/shell/assert.js
index ce2ec257763..20c8f2d481b 100644
--- a/src/mongo/shell/assert.js
+++ b/src/mongo/shell/assert.js
@@ -1,13 +1,13 @@
doassert = function(msg, obj) {
// eval if msg is a function
- if (typeof(msg) == "function")
+ if (typeof (msg) == "function")
msg = msg();
- if (typeof(msg) == "object")
+ if (typeof (msg) == "object")
msg = tojson(msg);
if (jsTest.options().traceExceptions) {
- if (typeof(msg) == "string" && msg.indexOf("assert") == 0)
+ if (typeof (msg) == "string" && msg.indexOf("assert") == 0)
print(msg);
else
print("assert: " + msg);
@@ -27,7 +27,6 @@ doassert = function(msg, obj) {
// Sort doc/obj fields and return new sorted obj
sortDoc = function(doc) {
-
// Helper to sort the elements of the array
var sortElementsOfArray = function(arr) {
var newArr = [];
@@ -318,7 +317,7 @@ assert = (function() {
var msgPrefix = "assert.soon failed: " + func;
if (msg) {
- if (typeof(msg) != "function") {
+ if (typeof (msg) != "function") {
msgPrefix = "assert.soon failed, msg";
}
}
@@ -328,7 +327,7 @@ assert = (function() {
interval = interval || 200;
var last;
while (1) {
- if (typeof(func) == "string") {
+ if (typeof (func) == "string") {
if (eval(func))
return;
} else {
@@ -418,7 +417,7 @@ assert = (function() {
var start = new Date();
timeout = timeout || 30000;
- if (typeof(f) == "string") {
+ if (typeof (f) == "string") {
res = eval(f);
} else {
res = f();
@@ -929,7 +928,6 @@ assert = (function() {
};
assert.gleOK = function(res, msg) {
-
var errMsg = null;
if (!res) {
@@ -950,7 +948,7 @@ assert = (function() {
assert.gleSuccess = function(dbOrGLEDoc, msg) {
var gle = dbOrGLEDoc instanceof DB ? dbOrGLEDoc.getLastErrorObj() : dbOrGLEDoc;
if (gle.err) {
- if (typeof(msg) == "function")
+ if (typeof (msg) == "function")
msg = msg(gle);
doassert(_buildAssertionMessage(msg, "getLastError not null: " + tojson(gle)), gle);
}
@@ -960,7 +958,7 @@ assert = (function() {
assert.gleError = function(dbOrGLEDoc, msg) {
var gle = dbOrGLEDoc instanceof DB ? dbOrGLEDoc.getLastErrorObj() : dbOrGLEDoc;
if (!gle.err) {
- if (typeof(msg) == "function")
+ if (typeof (msg) == "function")
msg = msg(gle);
doassert(_buildAssertionMessage(msg, "getLastError is null: " + tojson(gle)));
}
@@ -969,7 +967,7 @@ assert = (function() {
assert.gleErrorCode = function(dbOrGLEDoc, code, msg) {
var gle = dbOrGLEDoc instanceof DB ? dbOrGLEDoc.getLastErrorObj() : dbOrGLEDoc;
if (!gle.err || gle.code != code) {
- if (typeof(msg) == "function")
+ if (typeof (msg) == "function")
msg = msg(gle);
doassert(_buildAssertionMessage(
msg,
@@ -980,7 +978,7 @@ assert = (function() {
assert.gleErrorRegex = function(dbOrGLEDoc, regex, msg) {
var gle = dbOrGLEDoc instanceof DB ? dbOrGLEDoc.getLastErrorObj() : dbOrGLEDoc;
if (!gle.err || !regex.test(gle.err)) {
- if (typeof(msg) == "function")
+ if (typeof (msg) == "function")
msg = msg(gle);
doassert(_buildAssertionMessage(
msg,
diff --git a/src/mongo/shell/bench.cpp b/src/mongo/shell/bench.cpp
index cc523d8ac77..f5ca0b2af04 100644
--- a/src/mongo/shell/bench.cpp
+++ b/src/mongo/shell/bench.cpp
@@ -924,8 +924,7 @@ void BenchRunWorker::generateLoadOnConnection(DBClientBase* conn) {
{
opState.stats->trappedErrors.push_back(
BSON("error" << ex.what() << "op" << kOpTypeNames.find(op.op)->second
- << "count"
- << count));
+ << "count" << count));
}
if (_config->breakOnTrap)
return;
@@ -1040,8 +1039,8 @@ void BenchRunOp::executeOnce(DBClientBase* conn,
boost::none); // lastKnownCommittedOpTime
BSONObj getMoreCommandResult;
uassert(ErrorCodes::CommandFailed,
- str::stream() << "getMore command failed; reply was: "
- << getMoreCommandResult,
+ str::stream()
+ << "getMore command failed; reply was: " << getMoreCommandResult,
runCommandWithSession(conn,
this->ns,
getMoreRequest.toBSON(),
@@ -1390,11 +1389,11 @@ void BenchRunner::start() {
if (_config->username != "") {
std::string errmsg;
if (!conn->auth("admin", _config->username, _config->password, errmsg)) {
- uasserted(
- 16704,
- str::stream() << "User " << _config->username
- << " could not authenticate to admin db; admin db access is "
- "required to use benchRun with auth enabled");
+ uasserted(16704,
+ str::stream()
+ << "User " << _config->username
+ << " could not authenticate to admin db; admin db access is "
+ "required to use benchRun with auth enabled");
}
}
@@ -1429,11 +1428,11 @@ void BenchRunner::stop() {
std::string errmsg;
// this can only fail if admin access was revoked since start of run
if (!conn->auth("admin", _config->username, _config->password, errmsg)) {
- uasserted(
- 16705,
- str::stream() << "User " << _config->username
- << " could not authenticate to admin db; admin db access is "
- "still required to use benchRun with auth enabled");
+ uasserted(16705,
+ str::stream()
+ << "User " << _config->username
+ << " could not authenticate to admin db; admin db access is "
+ "still required to use benchRun with auth enabled");
}
}
}
diff --git a/src/mongo/shell/bench.h b/src/mongo/shell/bench.h
index 527beecae49..f73d2149abe 100644
--- a/src/mongo/shell/bench.h
+++ b/src/mongo/shell/bench.h
@@ -45,7 +45,7 @@
namespace pcrecpp {
class RE;
-} // namespace pcrecpp;
+} // namespace pcrecpp
namespace mongo {
@@ -431,9 +431,9 @@ public:
bool shouldWorkerFinish() const;
/**
- * Predicate that workers call to see if they should start collecting stats (as a result
- * of a call to tellWorkersToCollectStats()).
- */
+ * Predicate that workers call to see if they should start collecting stats (as a result
+ * of a call to tellWorkersToCollectStats()).
+ */
bool shouldWorkerCollectStats() const;
/**
diff --git a/src/mongo/shell/bulk_api.js b/src/mongo/shell/bulk_api.js
index eac31f7a871..eac2c063374 100644
--- a/src/mongo/shell/bulk_api.js
+++ b/src/mongo/shell/bulk_api.js
@@ -2,7 +2,6 @@
// Scope for the function
//
var _bulk_api_module = (function() {
-
// Batch types
var NONE = 0;
var INSERT = 1;
@@ -37,7 +36,6 @@ var _bulk_api_module = (function() {
* Accepts { w : x, j : x, wtimeout : x, fsync: x } or w, wtimeout, j
*/
var WriteConcern = function(wValue, wTimeout, jValue) {
-
if (!(this instanceof WriteConcern)) {
var writeConcern = Object.create(WriteConcern.prototype);
WriteConcern.apply(writeConcern, arguments);
@@ -97,7 +95,6 @@ var _bulk_api_module = (function() {
this.shellPrint = function() {
return this.toString();
};
-
};
/**
@@ -107,7 +104,6 @@ var _bulk_api_module = (function() {
* are used to filter the WriteResult to only include relevant result fields.
*/
var WriteResult = function(bulkResult, singleBatchType, writeConcern) {
-
if (!(this instanceof WriteResult))
return new WriteResult(bulkResult, singleBatchType, writeConcern);
@@ -217,7 +213,6 @@ var _bulk_api_module = (function() {
* Wraps the result for the commands
*/
var BulkWriteResult = function(bulkResult, singleBatchType, writeConcern) {
-
if (!(this instanceof BulkWriteResult) && !(this instanceof BulkWriteError))
return new BulkWriteResult(bulkResult, singleBatchType, writeConcern);
@@ -354,7 +349,6 @@ var _bulk_api_module = (function() {
* Represents a bulk write error, identical to a BulkWriteResult but thrown
*/
var BulkWriteError = function(bulkResult, singleBatchType, writeConcern, message) {
-
if (!(this instanceof BulkWriteError))
return new BulkWriteError(bulkResult, singleBatchType, writeConcern, message);
@@ -397,7 +391,6 @@ var _bulk_api_module = (function() {
* Wraps a command error
*/
var WriteCommandError = function(commandError) {
-
if (!(this instanceof WriteCommandError))
return new WriteCommandError(commandError);
@@ -607,7 +600,6 @@ var _bulk_api_module = (function() {
// Add to internal list of documents
var addToOperationsList = function(docType, document) {
-
if (Array.isArray(document))
throw Error("operation passed in cannot be an Array");
@@ -638,7 +630,7 @@ var _bulk_api_module = (function() {
* Otherwise, returns the same object passed.
*/
var addIdIfNeeded = function(obj) {
- if (typeof(obj._id) == "undefined" && !Array.isArray(obj)) {
+ if (typeof (obj._id) == "undefined" && !Array.isArray(obj)) {
var tmp = obj; // don't want to modify input
obj = {_id: new ObjectId()};
for (var key in tmp) {
@@ -812,7 +804,6 @@ var _bulk_api_module = (function() {
//
// Merge write command result into aggregated results object
var mergeBatchResults = function(batch, bulkResult, result) {
-
// If we have an insert Batch type
if (batch.batchType == INSERT) {
bulkResult.nInserted = bulkResult.nInserted + result.n;
@@ -1009,8 +1000,8 @@ var _bulk_api_module = (function() {
} else if (code == 19900 || // No longer primary
code == 16805 || // replicatedToNum no longer primary
code == 14330 || // gle wmode changed; invalid
- code == NOT_MASTER ||
- code == UNKNOWN_REPL_WRITE_CONCERN || code == WRITE_CONCERN_FAILED) {
+ code == NOT_MASTER || code == UNKNOWN_REPL_WRITE_CONCERN ||
+ code == WRITE_CONCERN_FAILED) {
extractedErr.wcError = {code: code, errmsg: errMsg};
} else if (!isOK) {
// This is a GLE failure we don't understand
@@ -1037,7 +1028,6 @@ var _bulk_api_module = (function() {
// Execute the operations, serially
var executeBatchWithLegacyOps = function(batch) {
-
var batchResult = {n: 0, writeErrors: [], upserted: []};
var extractedErr = null;
@@ -1113,10 +1103,11 @@ var _bulk_api_module = (function() {
bsonWoCompare(writeConcern, {w: 0}) != 0;
extractedErr = null;
- if (needToEnforceWC && (batchResult.writeErrors.length == 0 ||
- (!ordered &&
- // not all errored.
- batchResult.writeErrors.length < batch.operations.length))) {
+ if (needToEnforceWC &&
+ (batchResult.writeErrors.length == 0 ||
+ (!ordered &&
+ // not all errored.
+ batchResult.writeErrors.length < batch.operations.length))) {
// if last write errored
if (batchResult.writeErrors.length > 0 &&
batchResult.writeErrors[batchResult.writeErrors.length - 1].index ==
@@ -1237,7 +1228,6 @@ var _bulk_api_module = (function() {
};
return module;
-
})();
// Globals
diff --git a/src/mongo/shell/collection.js b/src/mongo/shell/collection.js
index c6c03b35e46..04aeddea965 100644
--- a/src/mongo/shell/collection.js
+++ b/src/mongo/shell/collection.js
@@ -167,7 +167,7 @@ DBCollection.prototype._makeCommand = function(cmd, params) {
};
DBCollection.prototype._dbCommand = function(cmd, params) {
- if (typeof(cmd) === "object")
+ if (typeof (cmd) === "object")
return this._db._dbCommand(cmd, {}, this.getQueryOptions());
return this._db._dbCommand(this._makeCommand(cmd, params), {}, this.getQueryOptions());
@@ -175,7 +175,7 @@ DBCollection.prototype._dbCommand = function(cmd, params) {
// Like _dbCommand, but applies $readPreference
DBCollection.prototype._dbReadCommand = function(cmd, params) {
- if (typeof(cmd) === "object")
+ if (typeof (cmd) === "object")
return this._db._dbReadCommand(cmd, {}, this.getQueryOptions());
return this._db._dbReadCommand(this._makeCommand(cmd, params), {}, this.getQueryOptions());
@@ -210,7 +210,6 @@ DBCollection.prototype._massageObject = function(q) {
}
throw Error("don't know how to massage : " + type);
-
};
DBCollection.prototype.find = function(query, fields, limit, skip, batchSize, options) {
@@ -276,7 +275,7 @@ DBCollection.prototype.insert = function(obj, options) {
var allowDottedFields = false;
if (options === undefined) {
// do nothing
- } else if (typeof(options) == 'object') {
+ } else if (typeof (options) == 'object') {
if (options.ordered === undefined) {
// do nothing, like above
} else {
@@ -299,7 +298,7 @@ DBCollection.prototype.insert = function(obj, options) {
var result = undefined;
var startTime =
- (typeof(_verboseShell) === 'undefined' || !_verboseShell) ? 0 : new Date().getTime();
+ (typeof (_verboseShell) === 'undefined' || !_verboseShell) ? 0 : new Date().getTime();
if (this.getMongo().writeMode() != "legacy") {
// Bit 1 of option flag is continueOnError. Bit 0 (stop on error) is the default.
@@ -329,7 +328,7 @@ DBCollection.prototype.insert = function(obj, options) {
}
}
} else {
- if (typeof(obj._id) == "undefined" && !Array.isArray(obj)) {
+ if (typeof (obj._id) == "undefined" && !Array.isArray(obj)) {
var tmp = obj; // don't want to modify input
obj = {_id: new ObjectId()};
for (var key in tmp) {
@@ -361,7 +360,7 @@ DBCollection.prototype._parseRemove = function(t, justOne) {
var wc = undefined;
var collation = undefined;
- if (typeof(justOne) === "object") {
+ if (typeof (justOne) === "object") {
var opts = justOne;
wc = opts.writeConcern;
justOne = opts.justOne;
@@ -390,7 +389,7 @@ DBCollection.prototype.remove = function(t, justOne) {
var result = undefined;
var startTime =
- (typeof(_verboseShell) === 'undefined' || !_verboseShell) ? 0 : new Date().getTime();
+ (typeof (_verboseShell) === 'undefined' || !_verboseShell) ? 0 : new Date().getTime();
if (this.getMongo().writeMode() != "legacy") {
var bulk = this.initializeOrderedBulkOp();
@@ -452,7 +451,7 @@ DBCollection.prototype._parseUpdate = function(query, updateSpec, upsert, multi)
let hint = undefined;
// can pass options via object for improved readability
- if (typeof(upsert) === "object") {
+ if (typeof (upsert) === "object") {
if (multi) {
throw Error("Fourth argument must be empty when specifying " +
"upsert and multi with an object.");
@@ -502,7 +501,7 @@ DBCollection.prototype.update = function(query, updateSpec, upsert, multi) {
var result = undefined;
var startTime =
- (typeof(_verboseShell) === 'undefined' || !_verboseShell) ? 0 : new Date().getTime();
+ (typeof (_verboseShell) === 'undefined' || !_verboseShell) ? 0 : new Date().getTime();
if (this.getMongo().writeMode() != "legacy") {
var bulk = this.initializeOrderedBulkOp();
@@ -567,10 +566,10 @@ DBCollection.prototype.save = function(obj, opts) {
if (obj == null)
throw Error("can't save a null");
- if (typeof(obj) == "number" || typeof(obj) == "string")
+ if (typeof (obj) == "number" || typeof (obj) == "string")
throw Error("can't save a number or string");
- if (typeof(obj._id) == "undefined") {
+ if (typeof (obj._id) == "undefined") {
obj._id = new ObjectId();
return this.insert(obj, opts);
} else {
@@ -598,11 +597,11 @@ DBCollection.prototype._indexSpec = function(keys, options) {
var ret = {ns: this._fullName, key: keys, name: this._genIndexName(keys)};
if (!options) {
- } else if (typeof(options) == "string")
+ } else if (typeof (options) == "string")
ret.name = options;
- else if (typeof(options) == "boolean")
+ else if (typeof (options) == "boolean")
ret.unique = true;
- else if (typeof(options) == "object") {
+ else if (typeof (options) == "object") {
if (Array.isArray(options)) {
if (options.length > 3) {
throw new Error("Index options that are supplied in array form may only specify" +
@@ -610,9 +609,9 @@ DBCollection.prototype._indexSpec = function(keys, options) {
}
var nb = 0;
for (var i = 0; i < options.length; i++) {
- if (typeof(options[i]) == "string")
+ if (typeof (options[i]) == "string")
ret.name = options[i];
- else if (typeof(options[i]) == "boolean") {
+ else if (typeof (options[i]) == "boolean") {
if (options[i]) {
if (nb == 0)
ret.unique = true;
@@ -626,7 +625,7 @@ DBCollection.prototype._indexSpec = function(keys, options) {
Object.extend(ret, options);
}
} else {
- throw Error("can't handle: " + typeof(options));
+ throw Error("can't handle: " + typeof (options));
}
return ret;
@@ -780,14 +779,14 @@ DBCollection.prototype._printExtraInfo = function(action, startTime) {
DBCollection.prototype.validate = function(full) {
var cmd = {validate: this.getName()};
- if (typeof(full) == 'object') // support arbitrary options here
+ if (typeof (full) == 'object') // support arbitrary options here
Object.extend(cmd, full);
else
cmd.full = full;
var res = this._db.runCommand(cmd);
- if (typeof(res.valid) == 'undefined') {
+ if (typeof (res.valid) == 'undefined') {
// old-style format just put everything in a string. Now using proper fields
res.valid = false;
@@ -842,7 +841,7 @@ DBCollection.prototype.hashAllDocs = function() {
var res = this._dbCommand(cmd);
var hash = res.collections[this._shortName];
assert(hash);
- assert(typeof(hash) == "string");
+ assert(typeof (hash) == "string");
return hash;
};
@@ -880,14 +879,14 @@ DBCollection.prototype.getCollection = function(subName) {
};
/**
- * scale: The scale at which to deliver results. Unless specified, this command returns all data
- * in bytes.
- * indexDetails: Includes indexDetails field in results. Default: false.
- * indexDetailsKey: If indexDetails is true, filter contents in indexDetails by this index key.
- * indexDetailsname: If indexDetails is true, filter contents in indexDetails by this index name.
- *
- * It is an error to provide both indexDetailsKey and indexDetailsName.
- */
+ * scale: The scale at which to deliver results. Unless specified, this command returns all data
+ * in bytes.
+ * indexDetails: Includes indexDetails field in results. Default: false.
+ * indexDetailsKey: If indexDetails is true, filter contents in indexDetails by this index key.
+ * indexDetailsname: If indexDetails is true, filter contents in indexDetails by this index name.
+ *
+ * It is an error to provide both indexDetailsKey and indexDetailsName.
+ */
DBCollection.prototype.stats = function(args) {
'use strict';
@@ -1040,8 +1039,8 @@ MapReduceResult.prototype.drop = function() {
};
/**
-* just for debugging really
-*/
+ * just for debugging really
+ */
MapReduceResult.prototype.convertToSingleObject = function() {
var z = {};
var it = this.results != null ? this.results : this._coll.find();
@@ -1060,13 +1059,13 @@ DBCollection.prototype.convertToSingleObject = function(valueField) {
};
/**
-* @param optional object of optional fields;
-*/
+ * @param optional object of optional fields;
+ */
DBCollection.prototype.mapReduce = function(map, reduce, optionsOrOutString) {
var c = {mapreduce: this._shortName, map: map, reduce: reduce};
assert(optionsOrOutString, "need to supply an optionsOrOutString");
- if (typeof(optionsOrOutString) == "string")
+ if (typeof (optionsOrOutString) == "string")
c["out"] = optionsOrOutString;
else
Object.extend(c, optionsOrOutString);
@@ -1086,7 +1085,6 @@ DBCollection.prototype.mapReduce = function(map, reduce, optionsOrOutString) {
throw _getErrorWithCode(raw, "map reduce failed:" + tojson(raw));
}
return new MapReduceResult(this._db, raw);
-
};
DBCollection.prototype.toString = function() {
@@ -1142,7 +1140,6 @@ will actually
*/
DBCollection.prototype.getShardDistribution = function() {
-
var stats = this.stats();
if (!stats.sharded) {
@@ -1175,8 +1172,8 @@ DBCollection.prototype.getShardDistribution = function() {
}
print("\nTotals");
- print(" data : " + sh._dataFormat(stats.size) + " docs : " + stats.count + " chunks : " +
- numChunks);
+ print(" data : " + sh._dataFormat(stats.size) + " docs : " + stats.count +
+ " chunks : " + numChunks);
for (var shard in stats.shards) {
var shardStats = stats.shards[shard];
@@ -1186,16 +1183,14 @@ DBCollection.prototype.getShardDistribution = function() {
(stats.count == 0) ? 0 : (Math.floor(shardStats.count / stats.count * 10000) / 100);
print(" Shard " + shard + " contains " + estDataPercent + "% data, " + estDocPercent +
- "% docs in cluster, " + "avg obj size on shard : " +
- sh._dataFormat(stats.shards[shard].avgObjSize));
+ "% docs in cluster, " +
+ "avg obj size on shard : " + sh._dataFormat(stats.shards[shard].avgObjSize));
}
print("\n");
-
};
DBCollection.prototype.getSplitKeysForChunks = function(chunkSize) {
-
var stats = this.stats();
if (!stats.sharded) {
@@ -1263,7 +1258,6 @@ DBCollection.prototype.getSplitKeysForChunks = function(chunkSize) {
var admin = this.getDB().getSiblingDB("admin");
var coll = this;
var splitFunction = function() {
-
// Turn off the balancer, just to be safe
print("Turning off balancer...");
config.settings.update({_id: "balancer"}, {$set: {stopped: true}}, true);
@@ -1290,11 +1284,11 @@ DBCollection.prototype.getSplitKeysForChunks = function(chunkSize) {
};
print("\nGenerated " + numSplits + " split keys, run output function to perform splits.\n" +
- " ex : \n" + " > var splitter = <collection>.getSplitKeysForChunks()\n" +
+ " ex : \n" +
+ " > var splitter = <collection>.getSplitKeysForChunks()\n" +
" > splitter() // Execute splits on cluster !\n");
return splitFunction;
-
};
DBCollection.prototype.setSlaveOk = function(value) {
@@ -1352,21 +1346,21 @@ DBCollection.prototype.unsetWriteConcern = function() {
//
/**
-* Count number of matching documents in the db to a query.
-*
-* @method
-* @param {object} query The query for the count.
-* @param {object} [options=null] Optional settings.
-* @param {number} [options.limit=null] The limit of documents to count.
-* @param {number} [options.skip=null] The number of documents to skip for the count.
-* @param {string|object} [options.hint=null] An index name hint or specification for the query.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @param {string} [options.readConcern=null] The level of readConcern passed to the count command
-* @param {object} [options.collation=null] The collation that should be used for string comparisons
-* for this count op.
-* @return {number}
-*
-*/
+ * Count number of matching documents in the db to a query.
+ *
+ * @method
+ * @param {object} query The query for the count.
+ * @param {object} [options=null] Optional settings.
+ * @param {number} [options.limit=null] The limit of documents to count.
+ * @param {number} [options.skip=null] The number of documents to skip for the count.
+ * @param {string|object} [options.hint=null] An index name hint or specification for the query.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @param {string} [options.readConcern=null] The level of readConcern passed to the count command
+ * @param {object} [options.collation=null] The collation that should be used for string comparisons
+ * for this count op.
+ * @return {number}
+ *
+ */
DBCollection.prototype.count = function(query, options) {
query = this.find(query);
@@ -1375,19 +1369,19 @@ DBCollection.prototype.count = function(query, options) {
};
/**
-* Count number of matching documents in the db to a query using aggregation.
-*
-* @method
-* @param {object} query The query for the count.
-* @param {object} [options=null] Optional settings.
-* @param {number} [options.limit=null] The limit of documents to count.
-* @param {number} [options.skip=null] The number of documents to skip for the count.
-* @param {string|object} [options.hint=null] An index name hint or specification for the query.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @param {object} [options.collation=null] The collation that should be used for string comparisons
-* for this count op.
-* @return {number}
-*/
+ * Count number of matching documents in the db to a query using aggregation.
+ *
+ * @method
+ * @param {object} query The query for the count.
+ * @param {object} [options=null] Optional settings.
+ * @param {number} [options.limit=null] The limit of documents to count.
+ * @param {number} [options.skip=null] The number of documents to skip for the count.
+ * @param {string|object} [options.hint=null] An index name hint or specification for the query.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @param {object} [options.collation=null] The collation that should be used for string comparisons
+ * for this count op.
+ * @return {number}
+ */
DBCollection.prototype.countDocuments = function(query, options) {
"use strict";
let pipeline = [{"$match": query}];
@@ -1424,13 +1418,13 @@ DBCollection.prototype.countDocuments = function(query, options) {
};
/**
-* Estimates the count of documents in a collection using collection metadata.
-*
-* @method
-* @param {object} [options=null] Optional settings.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @return {number}
-*/
+ * Estimates the count of documents in a collection using collection metadata.
+ *
+ * @method
+ * @param {object} [options=null] Optional settings.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @return {number}
+ */
DBCollection.prototype.estimatedDocumentCount = function(options) {
"use strict";
let cmd = {count: this.getName()};
@@ -1452,17 +1446,17 @@ DBCollection.prototype.estimatedDocumentCount = function(options) {
};
/**
-* The distinct command returns returns a list of distinct values for the given key across a
-*collection.
-*
-* @method
-* @param {string} key Field of the document to find distinct values for.
-* @param {object} query The query for filtering the set of documents to which we apply the distinct
-*filter.
-* @param {object} [options=null] Optional settings.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @return {object}
-*/
+ * The distinct command returns returns a list of distinct values for the given key across a
+ *collection.
+ *
+ * @method
+ * @param {string} key Field of the document to find distinct values for.
+ * @param {object} query The query for filtering the set of documents to which we apply the distinct
+ *filter.
+ * @param {object} [options=null] Optional settings.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @return {object}
+ */
DBCollection.prototype.distinct = function(keyString, query, options) {
var opts = Object.extend({}, options || {});
var keyStringType = typeof keyString;
@@ -1576,7 +1570,7 @@ PlanCache.prototype._parseQueryShape = function(query, projection, sort, collati
// Accept query shape object as only argument.
// Query shape must contain 'query', 'projection', and 'sort', and may optionally contain
// 'collation'. 'collation' must be non-empty if present.
- if (typeof(query) == 'object' && projection == undefined && sort == undefined &&
+ if (typeof (query) == 'object' && projection == undefined && sort == undefined &&
collation == undefined) {
var keysSorted = Object.keys(query).sort();
// Expected keys must be sorted for the comparison to work.
diff --git a/src/mongo/shell/crud_api.js b/src/mongo/shell/crud_api.js
index bcd245f4878..dd7d334291a 100644
--- a/src/mongo/shell/crud_api.js
+++ b/src/mongo/shell/crud_api.js
@@ -30,7 +30,7 @@ DBCollection.prototype.addIdIfNeeded = function(obj) {
if (typeof obj !== "object") {
throw new Error('argument passed to addIdIfNeeded is not an object');
}
- if (typeof(obj._id) == "undefined" && !Array.isArray(obj)) {
+ if (typeof (obj._id) == "undefined" && !Array.isArray(obj)) {
var tmp = obj; // don't want to modify input
obj = {_id: new ObjectId()};
@@ -45,32 +45,32 @@ DBCollection.prototype.addIdIfNeeded = function(obj) {
};
/**
-* Perform a bulkWrite operation without a fluent API
-*
-* Legal operation types are
-*
-* { insertOne: { document: { a: 1 } } }
-*
-* { updateOne: { filter: {a:2}, update: {$set: {"a.$[i]":2}}, upsert:true, collation: {locale:
-* "fr"}, arrayFilters: [{i: 0}] } }
-*
-* { updateMany: { filter: {a:2}, update: {$set: {"a.$[i]":2}}, upsert:true collation: {locale:
-* "fr"}, arrayFilters: [{i: 0}] } }
-*
-* { deleteOne: { filter: {c:1}, collation: {locale: "fr"} } }
-*
-* { deleteMany: { filter: {c:1}, collation: {locale: "fr"} } }
-*
-* { replaceOne: { filter: {c:3}, replacement: {c:4}, upsert:true, collation: {locale: "fr"} } }
-*
-* @method
-* @param {object[]} operations Bulk operations to perform.
-* @param {object} [options=null] Optional settings.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Perform a bulkWrite operation without a fluent API
+ *
+ * Legal operation types are
+ *
+ * { insertOne: { document: { a: 1 } } }
+ *
+ * { updateOne: { filter: {a:2}, update: {$set: {"a.$[i]":2}}, upsert:true, collation: {locale:
+ * "fr"}, arrayFilters: [{i: 0}] } }
+ *
+ * { updateMany: { filter: {a:2}, update: {$set: {"a.$[i]":2}}, upsert:true collation: {locale:
+ * "fr"}, arrayFilters: [{i: 0}] } }
+ *
+ * { deleteOne: { filter: {c:1}, collation: {locale: "fr"} } }
+ *
+ * { deleteMany: { filter: {c:1}, collation: {locale: "fr"} } }
+ *
+ * { replaceOne: { filter: {c:3}, replacement: {c:4}, upsert:true, collation: {locale: "fr"} } }
+ *
+ * @method
+ * @param {object[]} operations Bulk operations to perform.
+ * @param {object} [options=null] Optional settings.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.bulkWrite = function(operations, options) {
var opts = Object.extend({}, options || {});
opts.ordered = (typeof opts.ordered == 'boolean') ? opts.ordered : true;
@@ -221,16 +221,16 @@ DBCollection.prototype.bulkWrite = function(operations, options) {
};
/**
-* Inserts a single document into MongoDB.
-*
-* @method
-* @param {object} doc Document to insert.
-* @param {object} [options=null] Optional settings.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Inserts a single document into MongoDB.
+ *
+ * @method
+ * @param {object} doc Document to insert.
+ * @param {object} [options=null] Optional settings.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.insertOne = function(document, options) {
var opts = Object.extend({}, options || {});
@@ -276,17 +276,17 @@ DBCollection.prototype.insertOne = function(document, options) {
};
/**
-* Inserts an array of documents into MongoDB.
-*
-* @method
-* @param {object[]} docs Documents to insert.
-* @param {object} [options=null] Optional settings.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @param {boolean} [options.ordered=true] Execute inserts in ordered or unordered fashion.
-* @return {object}
-*/
+ * Inserts an array of documents into MongoDB.
+ *
+ * @method
+ * @param {object[]} docs Documents to insert.
+ * @param {object} [options=null] Optional settings.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @param {boolean} [options.ordered=true] Execute inserts in ordered or unordered fashion.
+ * @return {object}
+ */
DBCollection.prototype.insertMany = function(documents, options) {
var opts = Object.extend({}, options || {});
opts.ordered = (typeof opts.ordered == 'boolean') ? opts.ordered : true;
@@ -327,16 +327,16 @@ DBCollection.prototype.insertMany = function(documents, options) {
};
/**
-* Delete a document on MongoDB
-*
-* @method
-* @param {object} filter The filter used to select the document to remove
-* @param {object} [options=null] Optional settings.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Delete a document on MongoDB
+ *
+ * @method
+ * @param {object} filter The filter used to select the document to remove
+ * @param {object} [options=null] Optional settings.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.deleteOne = function(filter, options) {
var opts = Object.extend({}, options || {});
@@ -384,16 +384,16 @@ DBCollection.prototype.deleteOne = function(filter, options) {
};
/**
-* Delete multiple documents on MongoDB
-*
-* @method
-* @param {object} filter The Filter used to select the documents to remove
-* @param {object} [options=null] Optional settings.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Delete multiple documents on MongoDB
+ *
+ * @method
+ * @param {object} filter The Filter used to select the documents to remove
+ * @param {object} [options=null] Optional settings.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.deleteMany = function(filter, options) {
var opts = Object.extend({}, options || {});
@@ -441,18 +441,18 @@ DBCollection.prototype.deleteMany = function(filter, options) {
};
/**
-* Replace a document on MongoDB
-*
-* @method
-* @param {object} filter The Filter used to select the document to update
-* @param {object} doc The Document that replaces the matching document
-* @param {object} [options=null] Optional settings.
-* @param {boolean} [options.upsert=false] Update operation is an upsert.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Replace a document on MongoDB
+ *
+ * @method
+ * @param {object} filter The Filter used to select the document to update
+ * @param {object} doc The Document that replaces the matching document
+ * @param {object} [options=null] Optional settings.
+ * @param {boolean} [options.upsert=false] Update operation is an upsert.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.replaceOne = function(filter, replacement, options) {
var opts = Object.extend({}, options || {});
@@ -521,18 +521,18 @@ DBCollection.prototype.replaceOne = function(filter, replacement, options) {
};
/**
-* Update a single document on MongoDB
-*
-* @method
-* @param {object} filter The Filter used to select the document to update
-* @param {object} update The update operations to be applied to the document
-* @param {object} [options=null] Optional settings.
-* @param {boolean} [options.upsert=false] Update operation is an upsert.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Update a single document on MongoDB
+ *
+ * @method
+ * @param {object} filter The Filter used to select the document to update
+ * @param {object} update The update operations to be applied to the document
+ * @param {object} [options=null] Optional settings.
+ * @param {boolean} [options.upsert=false] Update operation is an upsert.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.updateOne = function(filter, update, options) {
var opts = Object.extend({}, options || {});
@@ -607,18 +607,18 @@ DBCollection.prototype.updateOne = function(filter, update, options) {
};
/**
-* Update multiple documents on MongoDB
-*
-* @method
-* @param {object} filter The Filter used to select the document to update
-* @param {object} update The update operations to be applied to the document
-* @param {object} [options=null] Optional settings.
-* @param {boolean} [options.upsert=false] Update operation is an upsert.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Update multiple documents on MongoDB
+ *
+ * @method
+ * @param {object} filter The Filter used to select the document to update
+ * @param {object} update The update operations to be applied to the document
+ * @param {object} [options=null] Optional settings.
+ * @param {boolean} [options.upsert=false] Update operation is an upsert.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.updateMany = function(filter, update, options) {
var opts = Object.extend({}, options || {});
@@ -693,18 +693,18 @@ DBCollection.prototype.updateMany = function(filter, update, options) {
};
/**
-* Find a document and delete it in one atomic operation,
-* requires a write lock for the duration of the operation.
-*
-* @method
-* @param {object} filter Document selection filter.
-* @param {object} [options=null] Optional settings.
-* @param {object} [options.projection=null] Limits the fields to return for all matching documents.
-* @param {object} [options.sort=null] Determines which document the operation modifies if the query
-*selects multiple documents.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @return {object}
-*/
+ * Find a document and delete it in one atomic operation,
+ * requires a write lock for the duration of the operation.
+ *
+ * @method
+ * @param {object} filter Document selection filter.
+ * @param {object} [options=null] Optional settings.
+ * @param {object} [options.projection=null] Limits the fields to return for all matching documents.
+ * @param {object} [options.sort=null] Determines which document the operation modifies if the query
+ *selects multiple documents.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @return {object}
+ */
DBCollection.prototype.findOneAndDelete = function(filter, options) {
var opts = Object.extend({}, options || {});
// Set up the command
@@ -739,22 +739,22 @@ DBCollection.prototype.findOneAndDelete = function(filter, options) {
};
/**
-* Find a document and replace it in one atomic operation, requires a write lock for the duration of
-*the operation.
-*
-* @method
-* @param {object} filter Document selection filter.
-* @param {object} replacement Document replacing the matching document.
-* @param {object} [options=null] Optional settings.
-* @param {object} [options.projection=null] Limits the fields to return for all matching documents.
-* @param {object} [options.sort=null] Determines which document the operation modifies if the query
-*selects multiple documents.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @param {boolean} [options.upsert=false] Upsert the document if it does not exist.
-* @param {boolean} [options.returnNewDocument=false] When true, returns the updated document rather
-*than the original. The default is false.
-* @return {object}
-*/
+ * Find a document and replace it in one atomic operation, requires a write lock for the duration of
+ *the operation.
+ *
+ * @method
+ * @param {object} filter Document selection filter.
+ * @param {object} replacement Document replacing the matching document.
+ * @param {object} [options=null] Optional settings.
+ * @param {object} [options.projection=null] Limits the fields to return for all matching documents.
+ * @param {object} [options.sort=null] Determines which document the operation modifies if the query
+ *selects multiple documents.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @param {boolean} [options.upsert=false] Upsert the document if it does not exist.
+ * @param {boolean} [options.returnNewDocument=false] When true, returns the updated document rather
+ *than the original. The default is false.
+ * @return {object}
+ */
DBCollection.prototype.findOneAndReplace = function(filter, replacement, options) {
var opts = Object.extend({}, options || {});
@@ -805,22 +805,22 @@ DBCollection.prototype.findOneAndReplace = function(filter, replacement, options
};
/**
-* Find a document and update it in one atomic operation, requires a write lock for the duration of
-*the operation.
-*
-* @method
-* @param {object} filter Document selection filter.
-* @param {object} update Update operations to be performed on the document
-* @param {object} [options=null] Optional settings.
-* @param {object} [options.projection=null] Limits the fields to return for all matching documents.
-* @param {object} [options.sort=null] Determines which document the operation modifies if the query
-*selects multiple documents.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @param {boolean} [options.upsert=false] Upsert the document if it does not exist.
-* @param {boolean} [options.returnNewDocument=false] When true, returns the updated document rather
-*than the original. The default is false.
-* @return {object}
-*/
+ * Find a document and update it in one atomic operation, requires a write lock for the duration of
+ *the operation.
+ *
+ * @method
+ * @param {object} filter Document selection filter.
+ * @param {object} update Update operations to be performed on the document
+ * @param {object} [options=null] Optional settings.
+ * @param {object} [options.projection=null] Limits the fields to return for all matching documents.
+ * @param {object} [options.sort=null] Determines which document the operation modifies if the query
+ *selects multiple documents.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @param {boolean} [options.upsert=false] Upsert the document if it does not exist.
+ * @param {boolean} [options.returnNewDocument=false] When true, returns the updated document rather
+ *than the original. The default is false.
+ * @return {object}
+ */
DBCollection.prototype.findOneAndUpdate = function(filter, update, options) {
var opts = Object.extend({}, options || {});
diff --git a/src/mongo/shell/db.js b/src/mongo/shell/db.js
index a309bda09d2..afc6e5357ed 100644
--- a/src/mongo/shell/db.js
+++ b/src/mongo/shell/db.js
@@ -4,1854 +4,1838 @@ var DB;
(function() {
- var _defaultWriteConcern = {w: 'majority', wtimeout: 10 * 60 * 1000};
+var _defaultWriteConcern = {w: 'majority', wtimeout: 10 * 60 * 1000};
- if (DB === undefined) {
- DB = function(mongo, name) {
- this._mongo = mongo;
- this._name = name;
- };
- }
-
- DB.prototype.getMongo = function() {
- assert(this._mongo, "why no mongo!");
- return this._mongo;
+if (DB === undefined) {
+ DB = function(mongo, name) {
+ this._mongo = mongo;
+ this._name = name;
};
+}
- DB.prototype.getSiblingDB = function(name) {
- return this.getSession().getDatabase(name);
- };
+DB.prototype.getMongo = function() {
+ assert(this._mongo, "why no mongo!");
+ return this._mongo;
+};
- DB.prototype.getSisterDB = DB.prototype.getSiblingDB;
+DB.prototype.getSiblingDB = function(name) {
+ return this.getSession().getDatabase(name);
+};
- DB.prototype.getName = function() {
- return this._name;
- };
+DB.prototype.getSisterDB = DB.prototype.getSiblingDB;
- DB.prototype.stats = function(scale) {
- return this.runCommand({dbstats: 1, scale: scale});
- };
+DB.prototype.getName = function() {
+ return this._name;
+};
- DB.prototype.getCollection = function(name) {
- return new DBCollection(this._mongo, this, name, this._name + "." + name);
- };
+DB.prototype.stats = function(scale) {
+ return this.runCommand({dbstats: 1, scale: scale});
+};
- DB.prototype.commandHelp = function(name) {
- var c = {};
- c[name] = 1;
- c.help = true;
- var res = this.runCommand(c);
- if (!res.ok)
- throw _getErrorWithCode(res, res.errmsg);
- return res.help;
- };
+DB.prototype.getCollection = function(name) {
+ return new DBCollection(this._mongo, this, name, this._name + "." + name);
+};
- // utility to attach readPreference if needed.
- DB.prototype._attachReadPreferenceToCommand = function(cmdObj, readPref) {
- "use strict";
- // if the user has not set a readpref, return the original cmdObj
- if ((readPref === null) || typeof(readPref) !== "object") {
- return cmdObj;
- }
-
- // if user specifies $readPreference manually, then don't change it
- if (cmdObj.hasOwnProperty("$readPreference")) {
- return cmdObj;
- }
+DB.prototype.commandHelp = function(name) {
+ var c = {};
+ c[name] = 1;
+ c.help = true;
+ var res = this.runCommand(c);
+ if (!res.ok)
+ throw _getErrorWithCode(res, res.errmsg);
+ return res.help;
+};
+
+// utility to attach readPreference if needed.
+DB.prototype._attachReadPreferenceToCommand = function(cmdObj, readPref) {
+ "use strict";
+ // if the user has not set a readpref, return the original cmdObj
+ if ((readPref === null) || typeof (readPref) !== "object") {
+ return cmdObj;
+ }
- // copy object so we don't mutate the original
- var clonedCmdObj = Object.extend({}, cmdObj);
- // The server selection spec mandates that the key is '$query', but
- // the shell has historically used 'query'. The server accepts both,
- // so we maintain the existing behavior
- var cmdObjWithReadPref = {query: clonedCmdObj, $readPreference: readPref};
- return cmdObjWithReadPref;
- };
+ // if user specifies $readPreference manually, then don't change it
+ if (cmdObj.hasOwnProperty("$readPreference")) {
+ return cmdObj;
+ }
- /**
- * If someone passes i.e. runCommand("foo", {bar: "baz"}), we merge it in to
- * runCommand({foo: 1, bar: "baz"}).
- * If we already have a command object in the first argument, we ensure that the second
- * argument 'extraKeys' is either null or an empty object. This prevents users from accidentally
- * calling runCommand({foo: 1}, {bar: 1}) and expecting the final command invocation to be
- * runCommand({foo: 1, bar: 1}).
- * This helper abstracts that logic.
- */
- DB.prototype._mergeCommandOptions = function(obj, extraKeys) {
- "use strict";
-
- if (typeof(obj) === "object") {
- if (Object.keys(extraKeys || {}).length > 0) {
- throw Error("Unexpected second argument to DB.runCommand(): (type: " +
- typeof(extraKeys) + "): " + tojson(extraKeys));
- }
- return obj;
- } else if (typeof(obj) !== "string") {
- throw Error("First argument to DB.runCommand() must be either an object or a string: " +
- "(type: " + typeof(obj) + "): " + tojson(obj));
- }
+ // copy object so we don't mutate the original
+ var clonedCmdObj = Object.extend({}, cmdObj);
+ // The server selection spec mandates that the key is '$query', but
+ // the shell has historically used 'query'. The server accepts both,
+ // so we maintain the existing behavior
+ var cmdObjWithReadPref = {query: clonedCmdObj, $readPreference: readPref};
+ return cmdObjWithReadPref;
+};
+
+/**
+ * If someone passes i.e. runCommand("foo", {bar: "baz"}), we merge it in to
+ * runCommand({foo: 1, bar: "baz"}).
+ * If we already have a command object in the first argument, we ensure that the second
+ * argument 'extraKeys' is either null or an empty object. This prevents users from accidentally
+ * calling runCommand({foo: 1}, {bar: 1}) and expecting the final command invocation to be
+ * runCommand({foo: 1, bar: 1}).
+ * This helper abstracts that logic.
+ */
+DB.prototype._mergeCommandOptions = function(obj, extraKeys) {
+ "use strict";
+
+ if (typeof (obj) === "object") {
+ if (Object.keys(extraKeys || {}).length > 0) {
+ throw Error("Unexpected second argument to DB.runCommand(): (type: " +
+ typeof (extraKeys) + "): " + tojson(extraKeys));
+ }
+ return obj;
+ } else if (typeof (obj) !== "string") {
+ throw Error("First argument to DB.runCommand() must be either an object or a string: " +
+ "(type: " + typeof (obj) + "): " + tojson(obj));
+ }
- var commandName = obj;
- var mergedCmdObj = {};
- mergedCmdObj[commandName] = 1;
-
- if (!extraKeys) {
- return mergedCmdObj;
- } else if (typeof(extraKeys) === "object") {
- // this will traverse the prototype chain of extra, but keeping
- // to maintain legacy behavior
- for (var key in extraKeys) {
- mergedCmdObj[key] = extraKeys[key];
- }
- } else {
- throw Error("Second argument to DB.runCommand(" + commandName +
- ") must be an object: (type: " + typeof(extraKeys) + "): " +
- tojson(extraKeys));
- }
+ var commandName = obj;
+ var mergedCmdObj = {};
+ mergedCmdObj[commandName] = 1;
+ if (!extraKeys) {
return mergedCmdObj;
- };
-
- // Like runCommand but applies readPreference if one has been set
- // on the connection. Also sets slaveOk if a (non-primary) readPref has been set.
- DB.prototype.runReadCommand = function(obj, extra, queryOptions) {
- "use strict";
-
- // Support users who call this function with a string commandName, e.g.
- // db.runReadCommand("commandName", {arg1: "value", arg2: "value"}).
- obj = this._mergeCommandOptions(obj, extra);
- queryOptions = queryOptions !== undefined ? queryOptions : this.getQueryOptions();
+ } else if (typeof (extraKeys) === "object") {
+ // this will traverse the prototype chain of extra, but keeping
+ // to maintain legacy behavior
+ for (var key in extraKeys) {
+ mergedCmdObj[key] = extraKeys[key];
+ }
+ } else {
+ throw Error("Second argument to DB.runCommand(" + commandName +
+ ") must be an object: (type: " + typeof (extraKeys) +
+ "): " + tojson(extraKeys));
+ }
- {
- const session = this.getSession();
+ return mergedCmdObj;
+};
- const readPreference = session._getSessionAwareClient().getReadPreference(session);
- if (readPreference !== null) {
- obj = this._attachReadPreferenceToCommand(obj, readPreference);
+// Like runCommand but applies readPreference if one has been set
+// on the connection. Also sets slaveOk if a (non-primary) readPref has been set.
+DB.prototype.runReadCommand = function(obj, extra, queryOptions) {
+ "use strict";
- if (readPreference.mode !== "primary") {
- // Set slaveOk if readPrefMode has been explicitly set with a readPreference
- // other than primary.
- queryOptions |= 4;
- }
- }
- }
+ // Support users who call this function with a string commandName, e.g.
+ // db.runReadCommand("commandName", {arg1: "value", arg2: "value"}).
+ obj = this._mergeCommandOptions(obj, extra);
+ queryOptions = queryOptions !== undefined ? queryOptions : this.getQueryOptions();
- // The 'extra' parameter is not used as we have already created a merged command object.
- return this.runCommand(obj, null, queryOptions);
- };
-
- // runCommand uses this impl to actually execute the command
- DB.prototype._runCommandImpl = function(name, obj, options) {
+ {
const session = this.getSession();
- return session._getSessionAwareClient().runCommand(session, name, obj, options);
- };
- DB.prototype.runCommand = function(obj, extra, queryOptions) {
- "use strict";
+ const readPreference = session._getSessionAwareClient().getReadPreference(session);
+ if (readPreference !== null) {
+ obj = this._attachReadPreferenceToCommand(obj, readPreference);
- // Support users who call this function with a string commandName, e.g.
- // db.runCommand("commandName", {arg1: "value", arg2: "value"}).
- var mergedObj = this._mergeCommandOptions(obj, extra);
-
- // if options were passed (i.e. because they were overridden on a collection), use them.
- // Otherwise use getQueryOptions.
- var options =
- (typeof(queryOptions) !== "undefined") ? queryOptions : this.getQueryOptions();
-
- try {
- return this._runCommandImpl(this._name, mergedObj, options);
- } catch (ex) {
- // When runCommand flowed through query, a connection error resulted in the message
- // "error doing query: failed". Even though this message is arguably incorrect
- // for a command failing due to a connection failure, we preserve it for backwards
- // compatibility. See SERVER-18334 for details.
- if (ex.message.indexOf("network error") >= 0) {
- throw new Error("error doing query: failed: " + ex.message);
+ if (readPreference.mode !== "primary") {
+ // Set slaveOk if readPrefMode has been explicitly set with a readPreference
+ // other than primary.
+ queryOptions |= 4;
}
- throw ex;
}
- };
-
- DB.prototype.runCommandWithMetadata = function(commandArgs, metadata) {
- const session = this.getSession();
- return session._getSessionAwareClient().runCommandWithMetadata(
- session, this._name, metadata, commandArgs);
- };
+ }
- DB.prototype._dbCommand = DB.prototype.runCommand;
- DB.prototype._dbReadCommand = DB.prototype.runReadCommand;
+ // The 'extra' parameter is not used as we have already created a merged command object.
+ return this.runCommand(obj, null, queryOptions);
+};
+
+// runCommand uses this impl to actually execute the command
+DB.prototype._runCommandImpl = function(name, obj, options) {
+ const session = this.getSession();
+ return session._getSessionAwareClient().runCommand(session, name, obj, options);
+};
+
+DB.prototype.runCommand = function(obj, extra, queryOptions) {
+ "use strict";
+
+ // Support users who call this function with a string commandName, e.g.
+ // db.runCommand("commandName", {arg1: "value", arg2: "value"}).
+ var mergedObj = this._mergeCommandOptions(obj, extra);
+
+ // if options were passed (i.e. because they were overridden on a collection), use them.
+ // Otherwise use getQueryOptions.
+ var options = (typeof (queryOptions) !== "undefined") ? queryOptions : this.getQueryOptions();
+
+ try {
+ return this._runCommandImpl(this._name, mergedObj, options);
+ } catch (ex) {
+ // When runCommand flowed through query, a connection error resulted in the message
+ // "error doing query: failed". Even though this message is arguably incorrect
+ // for a command failing due to a connection failure, we preserve it for backwards
+ // compatibility. See SERVER-18334 for details.
+ if (ex.message.indexOf("network error") >= 0) {
+ throw new Error("error doing query: failed: " + ex.message);
+ }
+ throw ex;
+ }
+};
- DB.prototype.adminCommand = function(obj, extra) {
- if (this._name == "admin")
- return this.runCommand(obj, extra);
- return this.getSiblingDB("admin").runCommand(obj, extra);
- };
+DB.prototype.runCommandWithMetadata = function(commandArgs, metadata) {
+ const session = this.getSession();
+ return session._getSessionAwareClient().runCommandWithMetadata(
+ session, this._name, metadata, commandArgs);
+};
- DB.prototype._adminCommand = DB.prototype.adminCommand; // alias old name
+DB.prototype._dbCommand = DB.prototype.runCommand;
+DB.prototype._dbReadCommand = DB.prototype.runReadCommand;
- DB.prototype._runAggregate = function(cmdObj, aggregateOptions) {
- assert(cmdObj.pipeline instanceof Array, "cmdObj must contain a 'pipeline' array");
- assert(cmdObj.aggregate !== undefined, "cmdObj must contain 'aggregate' field");
- assert(aggregateOptions === undefined || aggregateOptions instanceof Object,
- "'aggregateOptions' argument must be an object");
+DB.prototype.adminCommand = function(obj, extra) {
+ if (this._name == "admin")
+ return this.runCommand(obj, extra);
+ return this.getSiblingDB("admin").runCommand(obj, extra);
+};
- // Make a copy of the initial command object, i.e. {aggregate: x, pipeline: [...]}.
- cmdObj = Object.extend({}, cmdObj);
+DB.prototype._adminCommand = DB.prototype.adminCommand; // alias old name
- // Make a copy of the aggregation options.
- let optcpy = Object.extend({}, (aggregateOptions || {}));
+DB.prototype._runAggregate = function(cmdObj, aggregateOptions) {
+ assert(cmdObj.pipeline instanceof Array, "cmdObj must contain a 'pipeline' array");
+ assert(cmdObj.aggregate !== undefined, "cmdObj must contain 'aggregate' field");
+ assert(aggregateOptions === undefined || aggregateOptions instanceof Object,
+ "'aggregateOptions' argument must be an object");
- if ('batchSize' in optcpy) {
- if (optcpy.cursor == null) {
- optcpy.cursor = {};
- }
+ // Make a copy of the initial command object, i.e. {aggregate: x, pipeline: [...]}.
+ cmdObj = Object.extend({}, cmdObj);
- optcpy.cursor.batchSize = optcpy['batchSize'];
- delete optcpy['batchSize'];
- } else if ('useCursor' in optcpy) {
- if (optcpy.cursor == null) {
- optcpy.cursor = {};
- }
+ // Make a copy of the aggregation options.
+ let optcpy = Object.extend({}, (aggregateOptions || {}));
- delete optcpy['useCursor'];
+ if ('batchSize' in optcpy) {
+ if (optcpy.cursor == null) {
+ optcpy.cursor = {};
}
- const maxAwaitTimeMS = optcpy.maxAwaitTimeMS;
- delete optcpy.maxAwaitTimeMS;
-
- // Reassign the cleaned-up options.
- aggregateOptions = optcpy;
-
- // Add the options to the command object.
- Object.extend(cmdObj, aggregateOptions);
-
- if (!('cursor' in cmdObj)) {
- cmdObj.cursor = {};
+ optcpy.cursor.batchSize = optcpy['batchSize'];
+ delete optcpy['batchSize'];
+ } else if ('useCursor' in optcpy) {
+ if (optcpy.cursor == null) {
+ optcpy.cursor = {};
}
- const pipeline = cmdObj.pipeline;
-
- // Check whether the pipeline has a stage which performs writes like $out. If not, we may
- // run on a Secondary and should attach a readPreference.
- const hasWritingStage = (function() {
- if (pipeline.length == 0) {
- return false;
- }
- const lastStage = pipeline[pipeline.length - 1];
- return lastStage.hasOwnProperty("$out") || lastStage.hasOwnProperty("$merge");
- }());
-
- const doAgg = function(cmdObj) {
- return hasWritingStage ? this.runCommand(cmdObj) : this.runReadCommand(cmdObj);
- }.bind(this);
-
- const res = doAgg(cmdObj);
+ delete optcpy['useCursor'];
+ }
- if (!res.ok && (res.code == 17020 || res.errmsg == "unrecognized field \"cursor") &&
- !("cursor" in aggregateOptions)) {
- // If the command failed because cursors aren't supported and the user didn't explicitly
- // request a cursor, try again without requesting a cursor.
- delete cmdObj.cursor;
+ const maxAwaitTimeMS = optcpy.maxAwaitTimeMS;
+ delete optcpy.maxAwaitTimeMS;
- res = doAgg(cmdObj);
+ // Reassign the cleaned-up options.
+ aggregateOptions = optcpy;
- if ('result' in res && !("cursor" in res)) {
- // convert old-style output to cursor-style output
- res.cursor = {ns: '', id: NumberLong(0)};
- res.cursor.firstBatch = res.result;
- delete res.result;
- }
- }
+ // Add the options to the command object.
+ Object.extend(cmdObj, aggregateOptions);
- assert.commandWorked(res, "aggregate failed");
-
- if ("cursor" in res) {
- let batchSizeValue = undefined;
+ if (!('cursor' in cmdObj)) {
+ cmdObj.cursor = {};
+ }
- if (cmdObj["cursor"]["batchSize"] > 0) {
- batchSizeValue = cmdObj["cursor"]["batchSize"];
- }
+ const pipeline = cmdObj.pipeline;
- return new DBCommandCursor(this, res, batchSizeValue, maxAwaitTimeMS);
+ // Check whether the pipeline has a stage which performs writes like $out. If not, we may
+ // run on a Secondary and should attach a readPreference.
+ const hasWritingStage = (function() {
+ if (pipeline.length == 0) {
+ return false;
}
+ const lastStage = pipeline[pipeline.length - 1];
+ return lastStage.hasOwnProperty("$out") || lastStage.hasOwnProperty("$merge");
+ }());
- return res;
- };
-
- DB.prototype.aggregate = function(pipeline, aggregateOptions) {
- assert(pipeline instanceof Array, "pipeline argument must be an array");
- const cmdObj = this._mergeCommandOptions("aggregate", {pipeline: pipeline});
-
- return this._runAggregate(cmdObj, (aggregateOptions || {}));
- };
-
- /**
- Create a new collection in the database. Normally, collection creation is automatic. You
- would
- use this function if you wish to specify special options on creation.
-
- If the collection already exists, no action occurs.
-
- <p>Options:</p>
- <ul>
- <li>
- size: desired initial extent size for the collection. Must be <= 1000000000.
- for fixed size (capped) collections, this size is the total/max size of the
- collection.
- </li>
- <li>
- capped: if true, this is a capped collection (where old data rolls out).
- </li>
- <li> max: maximum number of objects if capped (optional).</li>
- <li>
- storageEngine: BSON document containing storage engine specific options. Format:
- {
- storageEngine: {
- storageEngine1: {
- ...
- },
- storageEngine2: {
- ...
- },
- ...
- }
- }
- </li>
- </ul>
+ const doAgg = function(cmdObj) {
+ return hasWritingStage ? this.runCommand(cmdObj) : this.runReadCommand(cmdObj);
+ }.bind(this);
- <p>Example:</p>
- <code>db.createCollection("movies", { size: 10 * 1024 * 1024, capped:true } );</code>
+ const res = doAgg(cmdObj);
- * @param {String} name Name of new collection to create
- * @param {Object} options Object with options for call. Options are listed above.
- * @return {Object} returned has member ok set to true if operation succeeds, false otherwise.
- */
- DB.prototype.createCollection = function(name, opt) {
- var options = opt || {};
+ if (!res.ok && (res.code == 17020 || res.errmsg == "unrecognized field \"cursor") &&
+ !("cursor" in aggregateOptions)) {
+ // If the command failed because cursors aren't supported and the user didn't explicitly
+ // request a cursor, try again without requesting a cursor.
+ delete cmdObj.cursor;
- var cmd = {create: name};
- Object.extend(cmd, options);
-
- return this._dbCommand(cmd);
- };
+ res = doAgg(cmdObj);
- /**
- * Command to create a view based on the specified aggregation pipeline.
- * Usage: db.createView(name, viewOn, pipeline: [{ $operator: {...}}, ... ])
- *
- * @param name String - name of the new view to create
- * @param viewOn String - name of the backing view or collection
- * @param pipeline [{ $operator: {...}}, ... ] - the aggregation pipeline that defines the view
- * @param options { } - options on the view, e.g., collations
- */
- DB.prototype.createView = function(name, viewOn, pipeline, opt) {
- var options = opt || {};
-
- var cmd = {create: name};
-
- if (viewOn == undefined) {
- throw Error("Must specify a backing view or collection");
+ if ('result' in res && !("cursor" in res)) {
+ // convert old-style output to cursor-style output
+ res.cursor = {ns: '', id: NumberLong(0)};
+ res.cursor.firstBatch = res.result;
+ delete res.result;
}
+ }
- // Since we allow a single stage pipeline to be specified as an object
- // in aggregation, we need to account for that here for consistency.
- if (pipeline != undefined) {
- if (!Array.isArray(pipeline)) {
- pipeline = [pipeline];
- }
- }
- options.pipeline = pipeline;
- options.viewOn = viewOn;
-
- Object.extend(cmd, options);
+ assert.commandWorked(res, "aggregate failed");
- return this._dbCommand(cmd);
- };
+ if ("cursor" in res) {
+ let batchSizeValue = undefined;
- /**
- * @deprecated use getProfilingStatus
- * Returns the current profiling level of this database
- * @return SOMETHING_FIXME or null on error
- */
- DB.prototype.getProfilingLevel = function() {
- var res = assert.commandWorked(this._dbCommand({profile: -1}));
- return res ? res.was : null;
- };
+ if (cmdObj["cursor"]["batchSize"] > 0) {
+ batchSizeValue = cmdObj["cursor"]["batchSize"];
+ }
- /**
- * @return the current profiling status
- * example { was : 0, slowms : 100 }
- * @return SOMETHING_FIXME or null on error
- */
- DB.prototype.getProfilingStatus = function() {
- var res = this._dbCommand({profile: -1});
- if (!res.ok)
- throw _getErrorWithCode(res, "profile command failed: " + tojson(res));
- delete res.ok;
- return res;
- };
+ return new DBCommandCursor(this, res, batchSizeValue, maxAwaitTimeMS);
+ }
- /**
- * Erase the entire database.
- * @params writeConcern: (document) expresses the write concern of the drop command.
- * @return Object returned has member ok set to true if operation succeeds, false otherwise.
- */
- DB.prototype.dropDatabase = function(writeConcern) {
- return this._dbCommand(
- {dropDatabase: 1, writeConcern: writeConcern ? writeConcern : _defaultWriteConcern});
- };
+ return res;
+};
+
+DB.prototype.aggregate = function(pipeline, aggregateOptions) {
+ assert(pipeline instanceof Array, "pipeline argument must be an array");
+ const cmdObj = this._mergeCommandOptions("aggregate", {pipeline: pipeline});
+
+ return this._runAggregate(cmdObj, (aggregateOptions || {}));
+};
+
+/**
+ Create a new collection in the database. Normally, collection creation is automatic. You
+ would
+ use this function if you wish to specify special options on creation.
+
+ If the collection already exists, no action occurs.
+
+ <p>Options:</p>
+ <ul>
+ <li>
+ size: desired initial extent size for the collection. Must be <= 1000000000.
+ for fixed size (capped) collections, this size is the total/max size of the
+ collection.
+ </li>
+ <li>
+ capped: if true, this is a capped collection (where old data rolls out).
+ </li>
+ <li> max: maximum number of objects if capped (optional).</li>
+ <li>
+ storageEngine: BSON document containing storage engine specific options. Format:
+ {
+ storageEngine: {
+ storageEngine1: {
+ ...
+ },
+ storageEngine2: {
+ ...
+ },
+ ...
+ }
+ }
+ </li>
+ </ul>
+
+ <p>Example:</p>
+ <code>db.createCollection("movies", { size: 10 * 1024 * 1024, capped:true } );</code>
+
+ * @param {String} name Name of new collection to create
+ * @param {Object} options Object with options for call. Options are listed above.
+ * @return {Object} returned has member ok set to true if operation succeeds, false otherwise.
+*/
+DB.prototype.createCollection = function(name, opt) {
+ var options = opt || {};
+
+ var cmd = {create: name};
+ Object.extend(cmd, options);
+
+ return this._dbCommand(cmd);
+};
+
+/**
+ * Command to create a view based on the specified aggregation pipeline.
+ * Usage: db.createView(name, viewOn, pipeline: [{ $operator: {...}}, ... ])
+ *
+ * @param name String - name of the new view to create
+ * @param viewOn String - name of the backing view or collection
+ * @param pipeline [{ $operator: {...}}, ... ] - the aggregation pipeline that defines the view
+ * @param options { } - options on the view, e.g., collations
+ */
+DB.prototype.createView = function(name, viewOn, pipeline, opt) {
+ var options = opt || {};
+
+ var cmd = {create: name};
+
+ if (viewOn == undefined) {
+ throw Error("Must specify a backing view or collection");
+ }
- /**
- * Shuts down the database. Must be run while using the admin database.
- * @param opts Options for shutdown. Possible options are:
- * - force: (boolean) if the server should shut down, even if there is no
- * up-to-date slave
- * - timeoutSecs: (number) the server will continue checking over timeoutSecs
- * if any other servers have caught up enough for it to shut down.
- */
- DB.prototype.shutdownServer = function(opts) {
- if ("admin" != this._name) {
- return "shutdown command only works with the admin database; try 'use admin'";
+ // Since we allow a single stage pipeline to be specified as an object
+ // in aggregation, we need to account for that here for consistency.
+ if (pipeline != undefined) {
+ if (!Array.isArray(pipeline)) {
+ pipeline = [pipeline];
}
+ }
+ options.pipeline = pipeline;
+ options.viewOn = viewOn;
+
+ Object.extend(cmd, options);
+
+ return this._dbCommand(cmd);
+};
+
+/**
+ * @deprecated use getProfilingStatus
+ * Returns the current profiling level of this database
+ * @return SOMETHING_FIXME or null on error
+ */
+DB.prototype.getProfilingLevel = function() {
+ var res = assert.commandWorked(this._dbCommand({profile: -1}));
+ return res ? res.was : null;
+};
+
+/**
+ * @return the current profiling status
+ * example { was : 0, slowms : 100 }
+ * @return SOMETHING_FIXME or null on error
+ */
+DB.prototype.getProfilingStatus = function() {
+ var res = this._dbCommand({profile: -1});
+ if (!res.ok)
+ throw _getErrorWithCode(res, "profile command failed: " + tojson(res));
+ delete res.ok;
+ return res;
+};
+
+/**
+ * Erase the entire database.
+ * @params writeConcern: (document) expresses the write concern of the drop command.
+ * @return Object returned has member ok set to true if operation succeeds, false otherwise.
+ */
+DB.prototype.dropDatabase = function(writeConcern) {
+ return this._dbCommand(
+ {dropDatabase: 1, writeConcern: writeConcern ? writeConcern : _defaultWriteConcern});
+};
+
+/**
+ * Shuts down the database. Must be run while using the admin database.
+ * @param opts Options for shutdown. Possible options are:
+ * - force: (boolean) if the server should shut down, even if there is no
+ * up-to-date slave
+ * - timeoutSecs: (number) the server will continue checking over timeoutSecs
+ * if any other servers have caught up enough for it to shut down.
+ */
+DB.prototype.shutdownServer = function(opts) {
+ if ("admin" != this._name) {
+ return "shutdown command only works with the admin database; try 'use admin'";
+ }
- var cmd = {'shutdown': 1};
- opts = opts || {};
- for (var o in opts) {
- cmd[o] = opts[o];
- }
+ var cmd = {'shutdown': 1};
+ opts = opts || {};
+ for (var o in opts) {
+ cmd[o] = opts[o];
+ }
- try {
- var res = this.runCommand(cmd);
- if (!res.ok) {
- throw _getErrorWithCode(res, 'shutdownServer failed: ' + tojson(res));
- }
- throw Error('shutdownServer failed: server is still up.');
- } catch (e) {
- // we expect the command to not return a response, as the server will shut down
- // immediately.
- if (isNetworkError(e)) {
- print('server should be down...');
- return;
- }
- throw e;
+ try {
+ var res = this.runCommand(cmd);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, 'shutdownServer failed: ' + tojson(res));
+ }
+ throw Error('shutdownServer failed: server is still up.');
+ } catch (e) {
+ // we expect the command to not return a response, as the server will shut down
+ // immediately.
+ if (isNetworkError(e)) {
+ print('server should be down...');
+ return;
}
- };
+ throw e;
+ }
+};
+
+/**
+ Clone database on another server to here. This functionality was removed as of MongoDB 4.2.
+ The shell helper is kept to maintain compatibility with previous versions of MongoDB.
+ <p>
+ Generally, you should dropDatabase() first as otherwise the cloned information will MERGE
+ into whatever data is already present in this database. (That is however a valid way to use
+ clone if you are trying to do something intentionally, such as union three non-overlapping
+ databases into one.)
+ <p>
+ This is a low level administrative function will is not typically used.
+
+ * @param {String} from Where to clone from (dbhostname[:port]). May not be this database
+ (self) as you cannot clone to yourself.
+ * @return Object returned has member ok set to true if operation succeeds, false otherwise.
+ * See also: db.copyDatabase()
+ */
+DB.prototype.cloneDatabase = function(from) {
+ print(
+ "WARNING: db.cloneDatabase will only function with MongoDB 4.0 and below. See http://dochub.mongodb.org/core/4.2-copydb-clone");
+ assert(isString(from) && from.length);
+ return this._dbCommand({clone: from});
+};
+
+/**
+ Copy database from one server or name to another server or name. This functionality was
+ removed as of MongoDB 4.2. The shell helper is kept to maintain compatibility with previous
+ versions of MongoDB.
+
+ Generally, you should dropDatabase() first as otherwise the copied information will MERGE
+ into whatever data is already present in this database (and you will get duplicate objects
+ in collections potentially.)
+
+ For security reasons this function only works when executed on the "admin" db. However,
+ if you have access to said db, you can copy any database from one place to another.
+
+ This method provides a way to "rename" a database by copying it to a new db name and
+ location. Additionally, it effectively provides a repair facility.
+
+ * @param {String} fromdb database name from which to copy.
+ * @param {String} todb database name to copy to.
+ * @param {String} fromhost hostname of the database (and optionally, ":port") from which to
+ copy the data. default if unspecified is to copy from self.
+ * @return Object returned has member ok set to true if operation succeeds, false otherwise.
+ * See also: db.clone()
+*/
+DB.prototype.copyDatabase = function(
+ fromdb, todb, fromhost, username, password, mechanism, slaveOk) {
+ print(
+ "WARNING: db.copyDatabase will only function with MongoDB 4.0 and below. See http://dochub.mongodb.org/core/4.2-copydb-clone");
+ assert(isString(fromdb) && fromdb.length);
+ assert(isString(todb) && todb.length);
+ fromhost = fromhost || "";
+ if ((typeof username === "boolean") && (typeof password === "undefined") &&
+ (typeof mechanism === "undefined") && (typeof slaveOk === "undefined")) {
+ slaveOk = username;
+ username = undefined;
+ }
+ if (typeof slaveOk !== "boolean") {
+ slaveOk = false;
+ }
- /**
- Clone database on another server to here. This functionality was removed as of MongoDB 4.2.
- The shell helper is kept to maintain compatibility with previous versions of MongoDB.
- <p>
- Generally, you should dropDatabase() first as otherwise the cloned information will MERGE
- into whatever data is already present in this database. (That is however a valid way to use
- clone if you are trying to do something intentionally, such as union three non-overlapping
- databases into one.)
- <p>
- This is a low level administrative function will is not typically used.
-
- * @param {String} from Where to clone from (dbhostname[:port]). May not be this database
- (self) as you cannot clone to yourself.
- * @return Object returned has member ok set to true if operation succeeds, false otherwise.
- * See also: db.copyDatabase()
- */
- DB.prototype.cloneDatabase = function(from) {
- print(
- "WARNING: db.cloneDatabase will only function with MongoDB 4.0 and below. See http://dochub.mongodb.org/core/4.2-copydb-clone");
- assert(isString(from) && from.length);
- return this._dbCommand({clone: from});
- };
+ if (!mechanism) {
+ mechanism = this._getDefaultAuthenticationMechanism(username, fromdb);
+ }
+ assert(mechanism == "SCRAM-SHA-1" || mechanism == "SCRAM-SHA-256" || mechanism == "MONGODB-CR");
- /**
- Copy database from one server or name to another server or name. This functionality was
- removed as of MongoDB 4.2. The shell helper is kept to maintain compatibility with previous
- versions of MongoDB.
-
- Generally, you should dropDatabase() first as otherwise the copied information will MERGE
- into whatever data is already present in this database (and you will get duplicate objects
- in collections potentially.)
-
- For security reasons this function only works when executed on the "admin" db. However,
- if you have access to said db, you can copy any database from one place to another.
-
- This method provides a way to "rename" a database by copying it to a new db name and
- location. Additionally, it effectively provides a repair facility.
-
- * @param {String} fromdb database name from which to copy.
- * @param {String} todb database name to copy to.
- * @param {String} fromhost hostname of the database (and optionally, ":port") from which to
- copy the data. default if unspecified is to copy from self.
- * @return Object returned has member ok set to true if operation succeeds, false otherwise.
- * See also: db.clone()
- */
- DB.prototype.copyDatabase = function(
- fromdb, todb, fromhost, username, password, mechanism, slaveOk) {
- print(
- "WARNING: db.copyDatabase will only function with MongoDB 4.0 and below. See http://dochub.mongodb.org/core/4.2-copydb-clone");
- assert(isString(fromdb) && fromdb.length);
- assert(isString(todb) && todb.length);
- fromhost = fromhost || "";
- if ((typeof username === "boolean") && (typeof password === "undefined") &&
- (typeof mechanism === "undefined") && (typeof slaveOk === "undefined")) {
- slaveOk = username;
- username = undefined;
- }
- if (typeof slaveOk !== "boolean") {
- slaveOk = false;
- }
+ // Check for no auth or copying from localhost
+ if (!username || !password || fromhost == "") {
+ return this._adminCommand(
+ {copydb: 1, fromhost: fromhost, fromdb: fromdb, todb: todb, slaveOk: slaveOk});
+ }
- if (!mechanism) {
- mechanism = this._getDefaultAuthenticationMechanism(username, fromdb);
- }
- assert(mechanism == "SCRAM-SHA-1" || mechanism == "SCRAM-SHA-256" ||
- mechanism == "MONGODB-CR");
+ // Use the copyDatabase native helper for SCRAM-SHA-1/256
+ if (mechanism != "MONGODB-CR") {
+ // TODO SERVER-30886: Add session support for Mongo.prototype.copyDatabaseWithSCRAM().
+ return this.getMongo().copyDatabaseWithSCRAM(
+ fromdb, todb, fromhost, username, password, slaveOk);
+ }
- // Check for no auth or copying from localhost
- if (!username || !password || fromhost == "") {
- return this._adminCommand(
- {copydb: 1, fromhost: fromhost, fromdb: fromdb, todb: todb, slaveOk: slaveOk});
+ // Fall back to MONGODB-CR
+ var n = assert.commandWorked(this._adminCommand({copydbgetnonce: 1, fromhost: fromhost}));
+ return this._adminCommand({
+ copydb: 1,
+ fromhost: fromhost,
+ fromdb: fromdb,
+ todb: todb,
+ username: username,
+ nonce: n.nonce,
+ key: this.__pwHash(n.nonce, username, password),
+ slaveOk: slaveOk,
+ });
+};
+
+DB.prototype.help = function() {
+ print("DB methods:");
+ print(
+ "\tdb.adminCommand(nameOrDocument) - switches to 'admin' db, and runs command [just calls db.runCommand(...)]");
+ print(
+ "\tdb.aggregate([pipeline], {options}) - performs a collectionless aggregation on this database; returns a cursor");
+ print("\tdb.auth(username, password)");
+ print("\tdb.cloneDatabase(fromhost) - will only function with MongoDB 4.0 and below");
+ print("\tdb.commandHelp(name) returns the help for the command");
+ print(
+ "\tdb.copyDatabase(fromdb, todb, fromhost) - will only function with MongoDB 4.0 and below");
+ print("\tdb.createCollection(name, {size: ..., capped: ..., max: ...})");
+ print("\tdb.createUser(userDocument)");
+ print("\tdb.createView(name, viewOn, [{$operator: {...}}, ...], {viewOptions})");
+ print("\tdb.currentOp() displays currently executing operations in the db");
+ print("\tdb.dropDatabase(writeConcern)");
+ print("\tdb.dropUser(username)");
+ print("\tdb.eval() - deprecated");
+ print("\tdb.fsyncLock() flush data to disk and lock server for backups");
+ print("\tdb.fsyncUnlock() unlocks server following a db.fsyncLock()");
+ print("\tdb.getCollection(cname) same as db['cname'] or db.cname");
+ print("\tdb.getCollectionInfos([filter]) - returns a list that contains the names and options" +
+ " of the db's collections");
+ print("\tdb.getCollectionNames()");
+ print("\tdb.getLastError() - just returns the err msg string");
+ print("\tdb.getLastErrorObj() - return full status object");
+ print("\tdb.getLogComponents()");
+ print("\tdb.getMongo() get the server connection object");
+ print("\tdb.getMongo().setSlaveOk() allow queries on a replication slave server");
+ print("\tdb.getName()");
+ print("\tdb.getProfilingLevel() - deprecated");
+ print("\tdb.getProfilingStatus() - returns if profiling is on and slow threshold");
+ print("\tdb.getReplicationInfo()");
+ print("\tdb.getSiblingDB(name) get the db at the same server as this one");
+ print(
+ "\tdb.getWriteConcern() - returns the write concern used for any operations on this db, inherited from server object if set");
+ print("\tdb.hostInfo() get details about the server's host");
+ print("\tdb.isMaster() check replica primary status");
+ print("\tdb.killOp(opid) kills the current operation in the db");
+ print("\tdb.listCommands() lists all the db commands");
+ print("\tdb.loadServerScripts() loads all the scripts in db.system.js");
+ print("\tdb.logout()");
+ print("\tdb.printCollectionStats()");
+ print("\tdb.printReplicationInfo()");
+ print("\tdb.printShardingStatus()");
+ print("\tdb.printSlaveReplicationInfo()");
+ print("\tdb.resetError()");
+ print(
+ "\tdb.runCommand(cmdObj) run a database command. if cmdObj is a string, turns it into {cmdObj: 1}");
+ print("\tdb.serverStatus()");
+ print("\tdb.setLogLevel(level,<component>)");
+ print("\tdb.setProfilingLevel(level,slowms) 0=off 1=slow 2=all");
+ print("\tdb.setVerboseShell(flag) display extra information in shell output");
+ print(
+ "\tdb.setWriteConcern(<write concern doc>) - sets the write concern for writes to the db");
+ print("\tdb.shutdownServer()");
+ print("\tdb.stats()");
+ print(
+ "\tdb.unsetWriteConcern(<write concern doc>) - unsets the write concern for writes to the db");
+ print("\tdb.version() current version of the server");
+ print("\tdb.watch() - opens a change stream cursor for a database to report on all " +
+ " changes to its non-system collections.");
+ return __magicNoPrint;
+};
+
+DB.prototype.printCollectionStats = function(scale) {
+ if (arguments.length > 1) {
+ print("printCollectionStats() has a single optional argument (scale)");
+ return;
+ }
+ if (typeof scale != 'undefined') {
+ if (typeof scale != 'number') {
+ print("scale has to be a number >= 1");
+ return;
}
-
- // Use the copyDatabase native helper for SCRAM-SHA-1/256
- if (mechanism != "MONGODB-CR") {
- // TODO SERVER-30886: Add session support for Mongo.prototype.copyDatabaseWithSCRAM().
- return this.getMongo().copyDatabaseWithSCRAM(
- fromdb, todb, fromhost, username, password, slaveOk);
+ if (scale < 1) {
+ print("scale has to be >= 1");
+ return;
}
+ }
+ var mydb = this;
+ this.getCollectionNames().forEach(function(z) {
+ print(z);
+ printjson(mydb.getCollection(z).stats(scale));
+ print("---");
+ });
+};
+
+/**
+ * Configures settings for capturing operations inside the system.profile collection and in the
+ * slow query log.
+ *
+ * The 'level' can be 0, 1, or 2:
+ * - 0 means that profiling is off and nothing will be written to system.profile.
+ * - 1 means that profiling is on for operations slower than the currently configured 'slowms'
+ * threshold (more on 'slowms' below).
+ * - 2 means that profiling is on for all operations, regardless of whether or not they are
+ * slower than 'slowms'.
+ *
+ * The 'options' parameter, if a number, is interpreted as the 'slowms' value to send to the
+ * server. 'slowms' determines the threshold, in milliseconds, above which slow operations get
+ * profiled at profiling level 1 or logged at logLevel 0.
+ *
+ * If 'options' is not a number, it is expected to be an object containing additional parameters
+ * to get passed to the server. For example, db.setProfilingLevel(2, {foo: "bar"}) will issue
+ * the command {profile: 2, foo: "bar"} to the server.
+ */
+DB.prototype.setProfilingLevel = function(level, options) {
+ if (level < 0 || level > 2) {
+ var errorText = "input level " + level + " is out of range [0..2]";
+ var errorObject = new Error(errorText);
+ errorObject['dbSetProfilingException'] = errorText;
+ throw errorObject;
+ }
- // Fall back to MONGODB-CR
- var n = assert.commandWorked(this._adminCommand({copydbgetnonce: 1, fromhost: fromhost}));
- return this._adminCommand({
- copydb: 1,
- fromhost: fromhost,
- fromdb: fromdb,
- todb: todb,
- username: username,
- nonce: n.nonce,
- key: this.__pwHash(n.nonce, username, password),
- slaveOk: slaveOk,
- });
- };
-
- DB.prototype.help = function() {
- print("DB methods:");
- print(
- "\tdb.adminCommand(nameOrDocument) - switches to 'admin' db, and runs command [just calls db.runCommand(...)]");
- print(
- "\tdb.aggregate([pipeline], {options}) - performs a collectionless aggregation on this database; returns a cursor");
- print("\tdb.auth(username, password)");
- print("\tdb.cloneDatabase(fromhost) - will only function with MongoDB 4.0 and below");
- print("\tdb.commandHelp(name) returns the help for the command");
- print(
- "\tdb.copyDatabase(fromdb, todb, fromhost) - will only function with MongoDB 4.0 and below");
- print("\tdb.createCollection(name, {size: ..., capped: ..., max: ...})");
- print("\tdb.createUser(userDocument)");
- print("\tdb.createView(name, viewOn, [{$operator: {...}}, ...], {viewOptions})");
- print("\tdb.currentOp() displays currently executing operations in the db");
- print("\tdb.dropDatabase(writeConcern)");
- print("\tdb.dropUser(username)");
- print("\tdb.eval() - deprecated");
- print("\tdb.fsyncLock() flush data to disk and lock server for backups");
- print("\tdb.fsyncUnlock() unlocks server following a db.fsyncLock()");
- print("\tdb.getCollection(cname) same as db['cname'] or db.cname");
- print(
- "\tdb.getCollectionInfos([filter]) - returns a list that contains the names and options" +
- " of the db's collections");
- print("\tdb.getCollectionNames()");
- print("\tdb.getLastError() - just returns the err msg string");
- print("\tdb.getLastErrorObj() - return full status object");
- print("\tdb.getLogComponents()");
- print("\tdb.getMongo() get the server connection object");
- print("\tdb.getMongo().setSlaveOk() allow queries on a replication slave server");
- print("\tdb.getName()");
- print("\tdb.getProfilingLevel() - deprecated");
- print("\tdb.getProfilingStatus() - returns if profiling is on and slow threshold");
- print("\tdb.getReplicationInfo()");
- print("\tdb.getSiblingDB(name) get the db at the same server as this one");
- print(
- "\tdb.getWriteConcern() - returns the write concern used for any operations on this db, inherited from server object if set");
- print("\tdb.hostInfo() get details about the server's host");
- print("\tdb.isMaster() check replica primary status");
- print("\tdb.killOp(opid) kills the current operation in the db");
- print("\tdb.listCommands() lists all the db commands");
- print("\tdb.loadServerScripts() loads all the scripts in db.system.js");
- print("\tdb.logout()");
- print("\tdb.printCollectionStats()");
- print("\tdb.printReplicationInfo()");
- print("\tdb.printShardingStatus()");
- print("\tdb.printSlaveReplicationInfo()");
- print("\tdb.resetError()");
- print(
- "\tdb.runCommand(cmdObj) run a database command. if cmdObj is a string, turns it into {cmdObj: 1}");
- print("\tdb.serverStatus()");
- print("\tdb.setLogLevel(level,<component>)");
- print("\tdb.setProfilingLevel(level,slowms) 0=off 1=slow 2=all");
- print("\tdb.setVerboseShell(flag) display extra information in shell output");
- print(
- "\tdb.setWriteConcern(<write concern doc>) - sets the write concern for writes to the db");
- print("\tdb.shutdownServer()");
- print("\tdb.stats()");
- print(
- "\tdb.unsetWriteConcern(<write concern doc>) - unsets the write concern for writes to the db");
- print("\tdb.version() current version of the server");
- print("\tdb.watch() - opens a change stream cursor for a database to report on all " +
- " changes to its non-system collections.");
- return __magicNoPrint;
- };
+ var cmd = {profile: level};
+ if (isNumber(options)) {
+ cmd.slowms = options;
+ } else {
+ cmd = Object.extend(cmd, options);
+ }
+ return assert.commandWorked(this._dbCommand(cmd));
+};
+
+/**
+ * @deprecated
+ * <p> Evaluate a js expression at the database server.</p>
+ *
+ * <p>Useful if you need to touch a lot of data lightly; in such a scenario
+ * the network transfer of the data could be a bottleneck. A good example
+ * is "select count(*)" -- can be done server side via this mechanism.
+ * </p>
+ *
+ * <p>
+ * If the eval fails, an exception is thrown of the form:
+ * </p>
+ * <code>{ dbEvalException: { retval: functionReturnValue, ok: num [, errno: num] [, errmsg:
+ *str] } }</code>
+ *
+ * <p>Example: </p>
+ * <code>print( "mycount: " + db.eval( function(){db.mycoll.find({},{_id:ObjId()}).length();}
+ *);</code>
+ *
+ * @param {Function} jsfunction Javascript function to run on server. Note this it not a
+ *closure, but rather just "code".
+ * @return result of your function, or null if error
+ *
+ */
+DB.prototype.eval = function(jsfunction) {
+ print("WARNING: db.eval is deprecated");
+
+ var cmd = {$eval: jsfunction};
+ if (arguments.length > 1) {
+ cmd.args = Array.from(arguments).slice(1);
+ }
- DB.prototype.printCollectionStats = function(scale) {
- if (arguments.length > 1) {
- print("printCollectionStats() has a single optional argument (scale)");
- return;
- }
- if (typeof scale != 'undefined') {
- if (typeof scale != 'number') {
- print("scale has to be a number >= 1");
- return;
- }
- if (scale < 1) {
- print("scale has to be >= 1");
- return;
+ var res = this._dbCommand(cmd);
+
+ if (!res.ok)
+ throw _getErrorWithCode(res, tojson(res));
+
+ return res.retval;
+};
+
+DB.prototype.dbEval = DB.prototype.eval;
+
+/**
+ * <p>
+ * An array of grouped items is returned. The array must fit in RAM, thus this function is not
+ * suitable when the return set is extremely large.
+ * </p>
+ * <p>
+ * To order the grouped data, simply sort it client side upon return.
+ * <p>
+ Defaults
+ cond may be null if you want to run against all rows in the collection
+ keyf is a function which takes an object and returns the desired key. set either key or
+ keyf (not both).
+ * </p>
+ */
+DB.prototype.groupeval = function(parmsObj) {
+ var groupFunction = function() {
+ var parms = args[0];
+ var c = db[parms.ns].find(parms.cond || {});
+ var map = new Map();
+ var pks = parms.key ? Object.keySet(parms.key) : null;
+ var pkl = pks ? pks.length : 0;
+ var key = {};
+
+ while (c.hasNext()) {
+ var obj = c.next();
+ if (pks) {
+ for (var i = 0; i < pkl; i++) {
+ var k = pks[i];
+ key[k] = obj[k];
+ }
+ } else {
+ key = parms.$keyf(obj);
}
- }
- var mydb = this;
- this.getCollectionNames().forEach(function(z) {
- print(z);
- printjson(mydb.getCollection(z).stats(scale));
- print("---");
- });
- };
- /**
- * Configures settings for capturing operations inside the system.profile collection and in the
- * slow query log.
- *
- * The 'level' can be 0, 1, or 2:
- * - 0 means that profiling is off and nothing will be written to system.profile.
- * - 1 means that profiling is on for operations slower than the currently configured 'slowms'
- * threshold (more on 'slowms' below).
- * - 2 means that profiling is on for all operations, regardless of whether or not they are
- * slower than 'slowms'.
- *
- * The 'options' parameter, if a number, is interpreted as the 'slowms' value to send to the
- * server. 'slowms' determines the threshold, in milliseconds, above which slow operations get
- * profiled at profiling level 1 or logged at logLevel 0.
- *
- * If 'options' is not a number, it is expected to be an object containing additional parameters
- * to get passed to the server. For example, db.setProfilingLevel(2, {foo: "bar"}) will issue
- * the command {profile: 2, foo: "bar"} to the server.
- */
- DB.prototype.setProfilingLevel = function(level, options) {
- if (level < 0 || level > 2) {
- var errorText = "input level " + level + " is out of range [0..2]";
- var errorObject = new Error(errorText);
- errorObject['dbSetProfilingException'] = errorText;
- throw errorObject;
+ var aggObj = map.get(key);
+ if (aggObj == null) {
+ var newObj = Object.extend({}, key); // clone
+ aggObj = Object.extend(newObj, parms.initial);
+ map.put(key, aggObj);
+ }
+ parms.$reduce(obj, aggObj);
}
- var cmd = {profile: level};
- if (isNumber(options)) {
- cmd.slowms = options;
- } else {
- cmd = Object.extend(cmd, options);
- }
- return assert.commandWorked(this._dbCommand(cmd));
+ return map.values();
};
- /**
- * @deprecated
- * <p> Evaluate a js expression at the database server.</p>
- *
- * <p>Useful if you need to touch a lot of data lightly; in such a scenario
- * the network transfer of the data could be a bottleneck. A good example
- * is "select count(*)" -- can be done server side via this mechanism.
- * </p>
- *
- * <p>
- * If the eval fails, an exception is thrown of the form:
- * </p>
- * <code>{ dbEvalException: { retval: functionReturnValue, ok: num [, errno: num] [, errmsg:
- *str] } }</code>
- *
- * <p>Example: </p>
- * <code>print( "mycount: " + db.eval( function(){db.mycoll.find({},{_id:ObjId()}).length();}
- *);</code>
- *
- * @param {Function} jsfunction Javascript function to run on server. Note this it not a
- *closure, but rather just "code".
- * @return result of your function, or null if error
- *
- */
- DB.prototype.eval = function(jsfunction) {
- print("WARNING: db.eval is deprecated");
-
- var cmd = {$eval: jsfunction};
- if (arguments.length > 1) {
- cmd.args = Array.from(arguments).slice(1);
- }
+ return this.eval(groupFunction, this._groupFixParms(parmsObj));
+};
- var res = this._dbCommand(cmd);
+DB.prototype._groupFixParms = function(parmsObj) {
+ var parms = Object.extend({}, parmsObj);
- if (!res.ok)
- throw _getErrorWithCode(res, tojson(res));
+ if (parms.reduce) {
+ parms.$reduce = parms.reduce; // must have $ to pass to db
+ delete parms.reduce;
+ }
- return res.retval;
- };
+ if (parms.keyf) {
+ parms.$keyf = parms.keyf;
+ delete parms.keyf;
+ }
- DB.prototype.dbEval = DB.prototype.eval;
-
- /**
- * <p>
- * An array of grouped items is returned. The array must fit in RAM, thus this function is not
- * suitable when the return set is extremely large.
- * </p>
- * <p>
- * To order the grouped data, simply sort it client side upon return.
- * <p>
- Defaults
- cond may be null if you want to run against all rows in the collection
- keyf is a function which takes an object and returns the desired key. set either key or
- keyf (not both).
- * </p>
- */
- DB.prototype.groupeval = function(parmsObj) {
-
- var groupFunction = function() {
- var parms = args[0];
- var c = db[parms.ns].find(parms.cond || {});
- var map = new Map();
- var pks = parms.key ? Object.keySet(parms.key) : null;
- var pkl = pks ? pks.length : 0;
- var key = {};
-
- while (c.hasNext()) {
- var obj = c.next();
- if (pks) {
- for (var i = 0; i < pkl; i++) {
- var k = pks[i];
- key[k] = obj[k];
- }
- } else {
- key = parms.$keyf(obj);
- }
+ return parms;
+};
+
+DB.prototype.resetError = function() {
+ return this.runCommand({reseterror: 1});
+};
+
+DB.prototype.forceError = function() {
+ return this.runCommand({forceerror: 1});
+};
+
+DB.prototype.getLastError = function(w, wtimeout) {
+ var res = this.getLastErrorObj(w, wtimeout);
+ if (!res.ok)
+ throw _getErrorWithCode(ret, "getlasterror failed: " + tojson(res));
+ return res.err;
+};
+DB.prototype.getLastErrorObj = function(w, wtimeout, j) {
+ var cmd = {getlasterror: 1};
+ if (w) {
+ cmd.w = w;
+ if (wtimeout)
+ cmd.wtimeout = wtimeout;
+ if (j != null)
+ cmd.j = j;
+ }
+ var res = this.runCommand(cmd);
+
+ if (!res.ok)
+ throw _getErrorWithCode(res, "getlasterror failed: " + tojson(res));
+ return res;
+};
+DB.prototype.getLastErrorCmd = DB.prototype.getLastErrorObj;
+
+DB.prototype._getCollectionInfosCommand = function(
+ filter, nameOnly = false, authorizedCollections = false, options = {}) {
+ filter = filter || {};
+ const cmd = {
+ listCollections: 1,
+ filter: filter,
+ nameOnly: nameOnly,
+ authorizedCollections: authorizedCollections
+ };
+
+ const res = this.runCommand(Object.merge(cmd, options));
+ if (!res.ok) {
+ throw _getErrorWithCode(res, "listCollections failed: " + tojson(res));
+ }
- var aggObj = map.get(key);
- if (aggObj == null) {
- var newObj = Object.extend({}, key); // clone
- aggObj = Object.extend(newObj, parms.initial);
- map.put(key, aggObj);
- }
- parms.$reduce(obj, aggObj);
- }
+ return new DBCommandCursor(this, res).toArray().sort(compareOn("name"));
+};
- return map.values();
- };
+DB.prototype._getCollectionInfosFromPrivileges = function() {
+ let ret = this.runCommand({connectionStatus: 1, showPrivileges: 1});
+ if (!ret.ok) {
+ throw _getErrorWithCode(res, "Failed to acquire collection information from privileges");
+ }
- return this.eval(groupFunction, this._groupFixParms(parmsObj));
- };
+ // Parse apart collection information.
+ let result = [];
- DB.prototype._groupFixParms = function(parmsObj) {
- var parms = Object.extend({}, parmsObj);
+ let privileges = ret.authInfo.authenticatedUserPrivileges;
+ if (privileges === undefined) {
+ return result;
+ }
- if (parms.reduce) {
- parms.$reduce = parms.reduce; // must have $ to pass to db
- delete parms.reduce;
+ privileges.forEach(privilege => {
+ let resource = privilege.resource;
+ if (resource === undefined) {
+ return;
}
-
- if (parms.keyf) {
- parms.$keyf = parms.keyf;
- delete parms.keyf;
+ let db = resource.db;
+ if (db === undefined || db !== this.getName()) {
+ return;
}
-
- return parms;
- };
-
- DB.prototype.resetError = function() {
- return this.runCommand({reseterror: 1});
- };
-
- DB.prototype.forceError = function() {
- return this.runCommand({forceerror: 1});
- };
-
- DB.prototype.getLastError = function(w, wtimeout) {
- var res = this.getLastErrorObj(w, wtimeout);
- if (!res.ok)
- throw _getErrorWithCode(ret, "getlasterror failed: " + tojson(res));
- return res.err;
- };
- DB.prototype.getLastErrorObj = function(w, wtimeout, j) {
- var cmd = {getlasterror: 1};
- if (w) {
- cmd.w = w;
- if (wtimeout)
- cmd.wtimeout = wtimeout;
- if (j != null)
- cmd.j = j;
+ let collection = resource.collection;
+ if (collection === undefined || typeof collection !== "string" || collection === "") {
+ return;
}
- var res = this.runCommand(cmd);
- if (!res.ok)
- throw _getErrorWithCode(res, "getlasterror failed: " + tojson(res));
- return res;
- };
- DB.prototype.getLastErrorCmd = DB.prototype.getLastErrorObj;
-
- DB.prototype._getCollectionInfosCommand = function(
- filter, nameOnly = false, authorizedCollections = false, options = {}) {
- filter = filter || {};
- const cmd = {
- listCollections: 1,
- filter: filter,
- nameOnly: nameOnly,
- authorizedCollections: authorizedCollections
- };
-
- const res = this.runCommand(Object.merge(cmd, options));
- if (!res.ok) {
- throw _getErrorWithCode(res, "listCollections failed: " + tojson(res));
+ result.push({name: collection});
+ });
+
+ return result.sort(compareOn("name"));
+};
+
+/**
+ * Returns a list that contains the names and options of this database's collections, sorted
+ * by collection name. An optional filter can be specified to match only collections with
+ * certain metadata.
+ */
+DB.prototype.getCollectionInfos = function(
+ filter, nameOnly = false, authorizedCollections = false) {
+ try {
+ return this._getCollectionInfosCommand(filter, nameOnly, authorizedCollections);
+ } catch (ex) {
+ if (ex.code !== ErrorCodes.Unauthorized) {
+ // We cannot recover from this error, propagate it.
+ throw ex;
}
- return new DBCommandCursor(this, res).toArray().sort(compareOn("name"));
- };
+ // We may be able to compute a set of *some* collections which exist and we have access
+ // to from our privileges. For this to work, the previous operation must have failed due
+ // to authorization, we must be attempting to recover the names of our own collections,
+ // and no filter can have been provided.
- DB.prototype._getCollectionInfosFromPrivileges = function() {
- let ret = this.runCommand({connectionStatus: 1, showPrivileges: 1});
- if (!ret.ok) {
- throw _getErrorWithCode(res,
- "Failed to acquire collection information from privileges");
+ if (nameOnly && authorizedCollections && Object.getOwnPropertyNames(filter).length === 0 &&
+ ex.code === ErrorCodes.Unauthorized) {
+ print(
+ "Warning: unable to run listCollections, attempting to approximate collection names by parsing connectionStatus");
+ return this._getCollectionInfosFromPrivileges();
}
- // Parse apart collection information.
- let result = [];
+ throw ex;
+ }
+};
+
+DB.prototype._getCollectionNamesInternal = function(options) {
+ return this._getCollectionInfosCommand({}, true, true, options).map(function(infoObj) {
+ return infoObj.name;
+ });
+};
+
+/**
+ * Returns this database's list of collection names in sorted order.
+ */
+DB.prototype.getCollectionNames = function() {
+ return this._getCollectionNamesInternal({});
+};
+
+DB.prototype.tojson = function() {
+ return this._name;
+};
+
+DB.prototype.toString = function() {
+ return this._name;
+};
+
+DB.prototype.isMaster = function() {
+ return this.runCommand("isMaster");
+};
+
+var commandUnsupported = function(res) {
+ return (!res.ok &&
+ (res.errmsg.startsWith("no such cmd") || res.errmsg.startsWith("no such command") ||
+ res.code === 59 /* CommandNotFound */));
+};
+
+DB.prototype.currentOp = function(arg) {
+ var q = {};
+ if (arg) {
+ if (typeof (arg) == "object")
+ Object.extend(q, arg);
+ else if (arg)
+ q["$all"] = true;
+ }
- let privileges = ret.authInfo.authenticatedUserPrivileges;
- if (privileges === undefined) {
- return result;
+ var commandObj = {"currentOp": 1};
+ Object.extend(commandObj, q);
+ var res = this.adminCommand(commandObj);
+ if (commandUnsupported(res)) {
+ // always send legacy currentOp with default (null) read preference (SERVER-17951)
+ const session = this.getSession();
+ const readPreference = session.getOptions().getReadPreference();
+ try {
+ session.getOptions().setReadPreference(null);
+ res = this.getSiblingDB("admin").$cmd.sys.inprog.findOne(q);
+ } finally {
+ session.getOptions().setReadPreference(readPreference);
}
-
- privileges.forEach(privilege => {
- let resource = privilege.resource;
- if (resource === undefined) {
- return;
- }
- let db = resource.db;
- if (db === undefined || db !== this.getName()) {
- return;
- }
- let collection = resource.collection;
- if (collection === undefined || typeof collection !== "string" || collection === "") {
- return;
- }
-
- result.push({name: collection});
- });
-
- return result.sort(compareOn("name"));
- };
-
- /**
- * Returns a list that contains the names and options of this database's collections, sorted
- * by collection name. An optional filter can be specified to match only collections with
- * certain metadata.
- */
- DB.prototype.getCollectionInfos = function(
- filter, nameOnly = false, authorizedCollections = false) {
+ }
+ return res;
+};
+DB.prototype.currentOP = DB.prototype.currentOp;
+
+DB.prototype.killOp = function(op) {
+ if (!op)
+ throw Error("no opNum to kill specified");
+ var res = this.adminCommand({'killOp': 1, 'op': op});
+ if (commandUnsupported(res)) {
+ // fall back for old servers
+ const session = this.getSession();
+ const readPreference = session.getOptions().getReadPreference();
try {
- return this._getCollectionInfosCommand(filter, nameOnly, authorizedCollections);
- } catch (ex) {
- if (ex.code !== ErrorCodes.Unauthorized) {
- // We cannot recover from this error, propagate it.
- throw ex;
- }
-
- // We may be able to compute a set of *some* collections which exist and we have access
- // to from our privileges. For this to work, the previous operation must have failed due
- // to authorization, we must be attempting to recover the names of our own collections,
- // and no filter can have been provided.
-
- if (nameOnly && authorizedCollections &&
- Object.getOwnPropertyNames(filter).length === 0 &&
- ex.code === ErrorCodes.Unauthorized) {
- print(
- "Warning: unable to run listCollections, attempting to approximate collection names by parsing connectionStatus");
- return this._getCollectionInfosFromPrivileges();
- }
-
- throw ex;
+ session.getOptions().setReadPreference(null);
+ res = this.getSiblingDB("admin").$cmd.sys.killop.findOne({'op': op});
+ } finally {
+ session.getOptions().setReadPreference(readPreference);
}
- };
-
- DB.prototype._getCollectionNamesInternal = function(options) {
- return this._getCollectionInfosCommand({}, true, true, options).map(function(infoObj) {
- return infoObj.name;
- });
- };
-
- /**
- * Returns this database's list of collection names in sorted order.
- */
- DB.prototype.getCollectionNames = function() {
- return this._getCollectionNamesInternal({});
- };
+ }
+ return res;
+};
+DB.prototype.killOP = DB.prototype.killOp;
+
+DB.tsToSeconds = function(x) {
+ if (x.t && x.i)
+ return x.t;
+ return x / 4294967296; // low 32 bits are ordinal #s within a second
+};
+
+/**
+ Get a replication log information summary.
+ <p>
+ This command is for the database/cloud administer and not applicable to most databases.
+ It is only used with the local database. One might invoke from the JS shell:
+ <pre>
+ use local
+ db.getReplicationInfo();
+ </pre>
+ * @return Object timeSpan: time span of the oplog from start to end if slave is more out
+ * of date than that, it can't recover without a complete resync
+*/
+DB.prototype.getReplicationInfo = function() {
+ var localdb = this.getSiblingDB("local");
+
+ var result = {};
+ var oplog;
+ var localCollections = localdb.getCollectionNames();
+ if (localCollections.indexOf('oplog.rs') >= 0) {
+ oplog = 'oplog.rs';
+ } else {
+ result.errmsg = "replication not detected";
+ return result;
+ }
- DB.prototype.tojson = function() {
- return this._name;
- };
+ var ol = localdb.getCollection(oplog);
+ var ol_stats = ol.stats();
+ if (ol_stats && ol_stats.maxSize) {
+ result.logSizeMB = ol_stats.maxSize / (1024 * 1024);
+ } else {
+ result.errmsg = "Could not get stats for local." + oplog + " collection. " +
+ "collstats returned: " + tojson(ol_stats);
+ return result;
+ }
- DB.prototype.toString = function() {
- return this._name;
- };
+ result.usedMB = ol_stats.size / (1024 * 1024);
+ result.usedMB = Math.ceil(result.usedMB * 100) / 100;
- DB.prototype.isMaster = function() {
- return this.runCommand("isMaster");
- };
+ var firstc = ol.find().sort({$natural: 1}).limit(1);
+ var lastc = ol.find().sort({$natural: -1}).limit(1);
+ if (!firstc.hasNext() || !lastc.hasNext()) {
+ result.errmsg =
+ "objects not found in local.oplog.$main -- is this a new and empty db instance?";
+ result.oplogMainRowCount = ol.count();
+ return result;
+ }
- var commandUnsupported = function(res) {
- return (!res.ok &&
- (res.errmsg.startsWith("no such cmd") || res.errmsg.startsWith("no such command") ||
- res.code === 59 /* CommandNotFound */));
- };
+ var first = firstc.next();
+ var last = lastc.next();
+ var tfirst = first.ts;
+ var tlast = last.ts;
+
+ if (tfirst && tlast) {
+ tfirst = DB.tsToSeconds(tfirst);
+ tlast = DB.tsToSeconds(tlast);
+ result.timeDiff = tlast - tfirst;
+ result.timeDiffHours = Math.round(result.timeDiff / 36) / 100;
+ result.tFirst = (new Date(tfirst * 1000)).toString();
+ result.tLast = (new Date(tlast * 1000)).toString();
+ result.now = Date();
+ } else {
+ result.errmsg = "ts element not found in oplog objects";
+ }
- DB.prototype.currentOp = function(arg) {
- var q = {};
- if (arg) {
- if (typeof(arg) == "object")
- Object.extend(q, arg);
- else if (arg)
- q["$all"] = true;
- }
+ return result;
+};
- var commandObj = {"currentOp": 1};
- Object.extend(commandObj, q);
- var res = this.adminCommand(commandObj);
- if (commandUnsupported(res)) {
- // always send legacy currentOp with default (null) read preference (SERVER-17951)
- const session = this.getSession();
- const readPreference = session.getOptions().getReadPreference();
- try {
- session.getOptions().setReadPreference(null);
- res = this.getSiblingDB("admin").$cmd.sys.inprog.findOne(q);
- } finally {
- session.getOptions().setReadPreference(readPreference);
- }
- }
- return res;
- };
- DB.prototype.currentOP = DB.prototype.currentOp;
-
- DB.prototype.killOp = function(op) {
- if (!op)
- throw Error("no opNum to kill specified");
- var res = this.adminCommand({'killOp': 1, 'op': op});
- if (commandUnsupported(res)) {
- // fall back for old servers
- const session = this.getSession();
- const readPreference = session.getOptions().getReadPreference();
- try {
- session.getOptions().setReadPreference(null);
- res = this.getSiblingDB("admin").$cmd.sys.killop.findOne({'op': op});
- } finally {
- session.getOptions().setReadPreference(readPreference);
- }
+DB.prototype.printReplicationInfo = function() {
+ var result = this.getReplicationInfo();
+ if (result.errmsg) {
+ var isMaster = this.isMaster();
+ if (isMaster.arbiterOnly) {
+ print("cannot provide replication status from an arbiter.");
+ return;
+ } else if (!isMaster.ismaster) {
+ print("this is a slave, printing slave replication info.");
+ this.printSlaveReplicationInfo();
+ return;
}
- return res;
- };
- DB.prototype.killOP = DB.prototype.killOp;
-
- DB.tsToSeconds = function(x) {
- if (x.t && x.i)
- return x.t;
- return x / 4294967296; // low 32 bits are ordinal #s within a second
- };
-
- /**
- Get a replication log information summary.
- <p>
- This command is for the database/cloud administer and not applicable to most databases.
- It is only used with the local database. One might invoke from the JS shell:
- <pre>
- use local
- db.getReplicationInfo();
- </pre>
- * @return Object timeSpan: time span of the oplog from start to end if slave is more out
- * of date than that, it can't recover without a complete resync
- */
- DB.prototype.getReplicationInfo = function() {
- var localdb = this.getSiblingDB("local");
-
- var result = {};
- var oplog;
- var localCollections = localdb.getCollectionNames();
- if (localCollections.indexOf('oplog.rs') >= 0) {
- oplog = 'oplog.rs';
+ print(tojson(result));
+ return;
+ }
+ print("configured oplog size: " + result.logSizeMB + "MB");
+ print("log length start to end: " + result.timeDiff + "secs (" + result.timeDiffHours + "hrs)");
+ print("oplog first event time: " + result.tFirst);
+ print("oplog last event time: " + result.tLast);
+ print("now: " + result.now);
+};
+
+DB.prototype.printSlaveReplicationInfo = function() {
+ var startOptimeDate = null;
+ var primary = null;
+
+ function getReplLag(st) {
+ assert(startOptimeDate, "how could this be null (getReplLag startOptimeDate)");
+ print("\tsyncedTo: " + st.toString());
+ var ago = (startOptimeDate - st) / 1000;
+ var hrs = Math.round(ago / 36) / 100;
+ var suffix = "";
+ if (primary) {
+ suffix = "primary ";
} else {
- result.errmsg = "replication not detected";
- return result;
+ suffix = "freshest member (no primary available at the moment)";
}
+ print("\t" + Math.round(ago) + " secs (" + hrs + " hrs) behind the " + suffix);
+ }
- var ol = localdb.getCollection(oplog);
- var ol_stats = ol.stats();
- if (ol_stats && ol_stats.maxSize) {
- result.logSizeMB = ol_stats.maxSize / (1024 * 1024);
- } else {
- result.errmsg = "Could not get stats for local." + oplog + " collection. " +
- "collstats returned: " + tojson(ol_stats);
- return result;
+ function getMaster(members) {
+ for (i in members) {
+ var row = members[i];
+ if (row.state === 1) {
+ return row;
+ }
}
- result.usedMB = ol_stats.size / (1024 * 1024);
- result.usedMB = Math.ceil(result.usedMB * 100) / 100;
-
- var firstc = ol.find().sort({$natural: 1}).limit(1);
- var lastc = ol.find().sort({$natural: -1}).limit(1);
- if (!firstc.hasNext() || !lastc.hasNext()) {
- result.errmsg =
- "objects not found in local.oplog.$main -- is this a new and empty db instance?";
- result.oplogMainRowCount = ol.count();
- return result;
- }
+ return null;
+ }
- var first = firstc.next();
- var last = lastc.next();
- var tfirst = first.ts;
- var tlast = last.ts;
-
- if (tfirst && tlast) {
- tfirst = DB.tsToSeconds(tfirst);
- tlast = DB.tsToSeconds(tlast);
- result.timeDiff = tlast - tfirst;
- result.timeDiffHours = Math.round(result.timeDiff / 36) / 100;
- result.tFirst = (new Date(tfirst * 1000)).toString();
- result.tLast = (new Date(tlast * 1000)).toString();
- result.now = Date();
+ function g(x) {
+ assert(x, "how could this be null (printSlaveReplicationInfo gx)");
+ print("source: " + x.host);
+ if (x.syncedTo) {
+ var st = new Date(DB.tsToSeconds(x.syncedTo) * 1000);
+ getReplLag(st);
} else {
- result.errmsg = "ts element not found in oplog objects";
+ print("\tdoing initial sync");
}
+ }
- return result;
- };
-
- DB.prototype.printReplicationInfo = function() {
- var result = this.getReplicationInfo();
- if (result.errmsg) {
- var isMaster = this.isMaster();
- if (isMaster.arbiterOnly) {
- print("cannot provide replication status from an arbiter.");
- return;
- } else if (!isMaster.ismaster) {
- print("this is a slave, printing slave replication info.");
- this.printSlaveReplicationInfo();
- return;
- }
- print(tojson(result));
+ function r(x) {
+ assert(x, "how could this be null (printSlaveReplicationInfo rx)");
+ if (x.state == 1 || x.state == 7) { // ignore primaries (1) and arbiters (7)
return;
}
- print("configured oplog size: " + result.logSizeMB + "MB");
- print("log length start to end: " + result.timeDiff + "secs (" + result.timeDiffHours +
- "hrs)");
- print("oplog first event time: " + result.tFirst);
- print("oplog last event time: " + result.tLast);
- print("now: " + result.now);
- };
- DB.prototype.printSlaveReplicationInfo = function() {
- var startOptimeDate = null;
- var primary = null;
-
- function getReplLag(st) {
- assert(startOptimeDate, "how could this be null (getReplLag startOptimeDate)");
- print("\tsyncedTo: " + st.toString());
- var ago = (startOptimeDate - st) / 1000;
- var hrs = Math.round(ago / 36) / 100;
- var suffix = "";
- if (primary) {
- suffix = "primary ";
- } else {
- suffix = "freshest member (no primary available at the moment)";
- }
- print("\t" + Math.round(ago) + " secs (" + hrs + " hrs) behind the " + suffix);
+ print("source: " + x.name);
+ if (x.optime) {
+ getReplLag(x.optimeDate);
+ } else {
+ print("\tno replication info, yet. State: " + x.stateStr);
}
+ }
- function getMaster(members) {
- for (i in members) {
- var row = members[i];
- if (row.state === 1) {
- return row;
- }
- }
+ var L = this.getSiblingDB("local");
- return null;
+ if (L.system.replset.count() != 0) {
+ var status = this.adminCommand({'replSetGetStatus': 1});
+ primary = getMaster(status.members);
+ if (primary) {
+ startOptimeDate = primary.optimeDate;
}
-
- function g(x) {
- assert(x, "how could this be null (printSlaveReplicationInfo gx)");
- print("source: " + x.host);
- if (x.syncedTo) {
- var st = new Date(DB.tsToSeconds(x.syncedTo) * 1000);
- getReplLag(st);
- } else {
- print("\tdoing initial sync");
+ // no primary, find the most recent op among all members
+ else {
+ startOptimeDate = new Date(0, 0);
+ for (i in status.members) {
+ if (status.members[i].optimeDate > startOptimeDate) {
+ startOptimeDate = status.members[i].optimeDate;
+ }
}
}
- function r(x) {
- assert(x, "how could this be null (printSlaveReplicationInfo rx)");
- if (x.state == 1 || x.state == 7) { // ignore primaries (1) and arbiters (7)
- return;
- }
-
- print("source: " + x.name);
- if (x.optime) {
- getReplLag(x.optimeDate);
- } else {
- print("\tno replication info, yet. State: " + x.stateStr);
- }
+ for (i in status.members) {
+ r(status.members[i]);
}
-
- var L = this.getSiblingDB("local");
-
- if (L.system.replset.count() != 0) {
- var status = this.adminCommand({'replSetGetStatus': 1});
- primary = getMaster(status.members);
- if (primary) {
- startOptimeDate = primary.optimeDate;
- }
- // no primary, find the most recent op among all members
- else {
- startOptimeDate = new Date(0, 0);
- for (i in status.members) {
- if (status.members[i].optimeDate > startOptimeDate) {
- startOptimeDate = status.members[i].optimeDate;
- }
- }
- }
-
- for (i in status.members) {
- r(status.members[i]);
+ }
+};
+
+DB.prototype.serverBuildInfo = function() {
+ return this._adminCommand("buildinfo");
+};
+
+// Used to trim entries from the metrics.commands that have never been executed
+getActiveCommands = function(tree) {
+ var result = {};
+ for (var i in tree) {
+ if (!tree.hasOwnProperty(i))
+ continue;
+ if (tree[i].hasOwnProperty("total")) {
+ if (tree[i].total > 0) {
+ result[i] = tree[i];
}
+ continue;
}
- };
-
- DB.prototype.serverBuildInfo = function() {
- return this._adminCommand("buildinfo");
- };
-
- // Used to trim entries from the metrics.commands that have never been executed
- getActiveCommands = function(tree) {
- var result = {};
- for (var i in tree) {
- if (!tree.hasOwnProperty(i))
- continue;
- if (tree[i].hasOwnProperty("total")) {
- if (tree[i].total > 0) {
- result[i] = tree[i];
- }
- continue;
- }
- if (i == "<UNKNOWN>") {
- if (tree[i] > 0) {
- result[i] = tree[i];
- }
- continue;
- }
- // Handles nested commands
- var subStatus = getActiveCommands(tree[i]);
- if (Object.keys(subStatus).length > 0) {
+ if (i == "<UNKNOWN>") {
+ if (tree[i] > 0) {
result[i] = tree[i];
}
+ continue;
}
- return result;
- };
-
- DB.prototype.serverStatus = function(options) {
- var cmd = {serverStatus: 1};
- if (options) {
- Object.extend(cmd, options);
+ // Handles nested commands
+ var subStatus = getActiveCommands(tree[i]);
+ if (Object.keys(subStatus).length > 0) {
+ result[i] = tree[i];
}
- var res = this._adminCommand(cmd);
- // Only prune if we have a metrics tree with commands.
- if (res.metrics && res.metrics.commands) {
- res.metrics.commands = getActiveCommands(res.metrics.commands);
- }
- return res;
- };
+ }
+ return result;
+};
- DB.prototype.hostInfo = function() {
- return this._adminCommand("hostInfo");
- };
+DB.prototype.serverStatus = function(options) {
+ var cmd = {serverStatus: 1};
+ if (options) {
+ Object.extend(cmd, options);
+ }
+ var res = this._adminCommand(cmd);
+ // Only prune if we have a metrics tree with commands.
+ if (res.metrics && res.metrics.commands) {
+ res.metrics.commands = getActiveCommands(res.metrics.commands);
+ }
+ return res;
+};
- DB.prototype.serverCmdLineOpts = function() {
- return this._adminCommand("getCmdLineOpts");
- };
+DB.prototype.hostInfo = function() {
+ return this._adminCommand("hostInfo");
+};
- DB.prototype.version = function() {
- return this.serverBuildInfo().version;
- };
+DB.prototype.serverCmdLineOpts = function() {
+ return this._adminCommand("getCmdLineOpts");
+};
- DB.prototype.serverBits = function() {
- return this.serverBuildInfo().bits;
- };
+DB.prototype.version = function() {
+ return this.serverBuildInfo().version;
+};
- DB.prototype.listCommands = function() {
- var x = this.runCommand("listCommands");
- for (var name in x.commands) {
- var c = x.commands[name];
+DB.prototype.serverBits = function() {
+ return this.serverBuildInfo().bits;
+};
- var s = name + ": ";
+DB.prototype.listCommands = function() {
+ var x = this.runCommand("listCommands");
+ for (var name in x.commands) {
+ var c = x.commands[name];
- if (c.adminOnly)
- s += " adminOnly ";
- if (c.slaveOk)
- s += " slaveOk ";
+ var s = name + ": ";
- s += "\n ";
- s += c.help.replace(/\n/g, '\n ');
- s += "\n";
+ if (c.adminOnly)
+ s += " adminOnly ";
+ if (c.slaveOk)
+ s += " slaveOk ";
- print(s);
- }
- };
+ s += "\n ";
+ s += c.help.replace(/\n/g, '\n ');
+ s += "\n";
- DB.prototype.printShardingStatus = function(verbose) {
- printShardingStatus(this.getSiblingDB("config"), verbose);
- };
+ print(s);
+ }
+};
- DB.prototype.fsyncLock = function() {
- return this.adminCommand({fsync: 1, lock: true});
- };
+DB.prototype.printShardingStatus = function(verbose) {
+ printShardingStatus(this.getSiblingDB("config"), verbose);
+};
- DB.prototype.fsyncUnlock = function() {
- var res = this.adminCommand({fsyncUnlock: 1});
- if (commandUnsupported(res)) {
- const session = this.getSession();
- const readPreference = session.getOptions().getReadPreference();
- try {
- session.getOptions().setReadPreference(null);
- res = this.getSiblingDB("admin").$cmd.sys.unlock.findOne();
- } finally {
- session.getOptions().setReadPreference(readPreference);
- }
- }
- return res;
- };
+DB.prototype.fsyncLock = function() {
+ return this.adminCommand({fsync: 1, lock: true});
+};
- DB.autocomplete = function(obj) {
- // Time out if a transaction or other op holds locks we need. Caller suppresses exceptions.
- var colls = obj._getCollectionNamesInternal({maxTimeMS: 1000});
- var ret = [];
- for (var i = 0; i < colls.length; i++) {
- if (colls[i].match(/^[a-zA-Z0-9_.\$]+$/))
- ret.push(colls[i]);
+DB.prototype.fsyncUnlock = function() {
+ var res = this.adminCommand({fsyncUnlock: 1});
+ if (commandUnsupported(res)) {
+ const session = this.getSession();
+ const readPreference = session.getOptions().getReadPreference();
+ try {
+ session.getOptions().setReadPreference(null);
+ res = this.getSiblingDB("admin").$cmd.sys.unlock.findOne();
+ } finally {
+ session.getOptions().setReadPreference(readPreference);
}
- return ret;
- };
-
- DB.prototype.setSlaveOk = function(value) {
- if (value == undefined)
- value = true;
- this._slaveOk = value;
- };
-
- DB.prototype.getSlaveOk = function() {
- if (this._slaveOk != undefined)
- return this._slaveOk;
- return this._mongo.getSlaveOk();
- };
-
- DB.prototype.getQueryOptions = function() {
- var options = 0;
- if (this.getSlaveOk())
- options |= 4;
- return options;
- };
-
- /* Loads any scripts contained in system.js into the client shell.
- */
- DB.prototype.loadServerScripts = function() {
- var global = Function('return this')();
- this.system.js.find().forEach(function(u) {
- if (u.value.constructor === Code) {
- global[u._id] = eval("(" + u.value.code + ")");
- } else {
- global[u._id] = u.value;
- }
- });
- };
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////
- //////////////////////////// Security shell helpers below
- /////////////////////////////////////////////
- ////////////////////////////////////////////////////////////////////////////////////////////////////
-
- function getUserObjString(userObj) {
- var pwd = userObj.pwd;
- delete userObj.pwd;
- var toreturn = tojson(userObj);
- userObj.pwd = pwd;
- return toreturn;
}
-
- DB.prototype._modifyCommandToDigestPasswordIfNecessary = function(cmdObj, username) {
- if (!cmdObj["pwd"]) {
- return;
- }
- if (cmdObj.hasOwnProperty("digestPassword")) {
- throw Error(
- "Cannot specify 'digestPassword' through the user management shell helpers, " +
- "use 'passwordDigestor' instead");
- }
- var passwordDigestor = cmdObj["passwordDigestor"] ? cmdObj["passwordDigestor"] : "server";
- if (passwordDigestor == "server") {
- cmdObj["digestPassword"] = true;
- } else if (passwordDigestor == "client") {
- cmdObj["pwd"] = _hashPassword(username, cmdObj["pwd"]);
- cmdObj["digestPassword"] = false;
+ return res;
+};
+
+DB.autocomplete = function(obj) {
+ // Time out if a transaction or other op holds locks we need. Caller suppresses exceptions.
+ var colls = obj._getCollectionNamesInternal({maxTimeMS: 1000});
+ var ret = [];
+ for (var i = 0; i < colls.length; i++) {
+ if (colls[i].match(/^[a-zA-Z0-9_.\$]+$/))
+ ret.push(colls[i]);
+ }
+ return ret;
+};
+
+DB.prototype.setSlaveOk = function(value) {
+ if (value == undefined)
+ value = true;
+ this._slaveOk = value;
+};
+
+DB.prototype.getSlaveOk = function() {
+ if (this._slaveOk != undefined)
+ return this._slaveOk;
+ return this._mongo.getSlaveOk();
+};
+
+DB.prototype.getQueryOptions = function() {
+ var options = 0;
+ if (this.getSlaveOk())
+ options |= 4;
+ return options;
+};
+
+/* Loads any scripts contained in system.js into the client shell.
+ */
+DB.prototype.loadServerScripts = function() {
+ var global = Function('return this')();
+ this.system.js.find().forEach(function(u) {
+ if (u.value.constructor === Code) {
+ global[u._id] = eval("(" + u.value.code + ")");
} else {
- throw Error("'passwordDigestor' must be either 'server' or 'client', got: '" +
- passwordDigestor + "'");
- }
- delete cmdObj["passwordDigestor"];
- };
+ global[u._id] = u.value;
+ }
+ });
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////// Security shell helpers below
+/////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+function getUserObjString(userObj) {
+ var pwd = userObj.pwd;
+ delete userObj.pwd;
+ var toreturn = tojson(userObj);
+ userObj.pwd = pwd;
+ return toreturn;
+}
+
+DB.prototype._modifyCommandToDigestPasswordIfNecessary = function(cmdObj, username) {
+ if (!cmdObj["pwd"]) {
+ return;
+ }
+ if (cmdObj.hasOwnProperty("digestPassword")) {
+ throw Error("Cannot specify 'digestPassword' through the user management shell helpers, " +
+ "use 'passwordDigestor' instead");
+ }
+ var passwordDigestor = cmdObj["passwordDigestor"] ? cmdObj["passwordDigestor"] : "server";
+ if (passwordDigestor == "server") {
+ cmdObj["digestPassword"] = true;
+ } else if (passwordDigestor == "client") {
+ cmdObj["pwd"] = _hashPassword(username, cmdObj["pwd"]);
+ cmdObj["digestPassword"] = false;
+ } else {
+ throw Error("'passwordDigestor' must be either 'server' or 'client', got: '" +
+ passwordDigestor + "'");
+ }
+ delete cmdObj["passwordDigestor"];
+};
- DB.prototype.createUser = function(userObj, writeConcern) {
- var name = userObj["user"];
- if (name === undefined) {
- throw Error("no 'user' field provided to 'createUser' function");
- }
+DB.prototype.createUser = function(userObj, writeConcern) {
+ var name = userObj["user"];
+ if (name === undefined) {
+ throw Error("no 'user' field provided to 'createUser' function");
+ }
- if (userObj["createUser"] !== undefined) {
- throw Error("calling 'createUser' function with 'createUser' field is disallowed");
- }
+ if (userObj["createUser"] !== undefined) {
+ throw Error("calling 'createUser' function with 'createUser' field is disallowed");
+ }
- var cmdObj = {createUser: name};
- cmdObj = Object.extend(cmdObj, userObj);
- delete cmdObj["user"];
+ var cmdObj = {createUser: name};
+ cmdObj = Object.extend(cmdObj, userObj);
+ delete cmdObj["user"];
- this._modifyCommandToDigestPasswordIfNecessary(cmdObj, name);
+ this._modifyCommandToDigestPasswordIfNecessary(cmdObj, name);
- cmdObj["writeConcern"] = writeConcern ? writeConcern : _defaultWriteConcern;
+ cmdObj["writeConcern"] = writeConcern ? writeConcern : _defaultWriteConcern;
- var res = this.runCommand(cmdObj);
+ var res = this.runCommand(cmdObj);
- if (res.ok) {
- print("Successfully added user: " + getUserObjString(userObj));
- return;
- }
+ if (res.ok) {
+ print("Successfully added user: " + getUserObjString(userObj));
+ return;
+ }
- if (res.errmsg == "no such cmd: createUser") {
- throw Error("'createUser' command not found. This is most likely because you are " +
- "talking to an old (pre v2.6) MongoDB server");
- }
+ if (res.errmsg == "no such cmd: createUser") {
+ throw Error("'createUser' command not found. This is most likely because you are " +
+ "talking to an old (pre v2.6) MongoDB server");
+ }
- if (res.errmsg == "timeout") {
- throw Error("timed out while waiting for user authentication to replicate - " +
- "database will not be fully secured until replication finishes");
- }
+ if (res.errmsg == "timeout") {
+ throw Error("timed out while waiting for user authentication to replicate - " +
+ "database will not be fully secured until replication finishes");
+ }
- throw _getErrorWithCode(res, "couldn't add user: " + res.errmsg);
- };
+ throw _getErrorWithCode(res, "couldn't add user: " + res.errmsg);
+};
- function _hashPassword(username, password) {
- if (typeof password != 'string') {
- throw Error("User passwords must be of type string. Was given password with type: " +
- typeof(password));
- }
- return hex_md5(username + ":mongo:" + password);
+function _hashPassword(username, password) {
+ if (typeof password != 'string') {
+ throw Error("User passwords must be of type string. Was given password with type: " +
+ typeof (password));
+ }
+ return hex_md5(username + ":mongo:" + password);
+}
+
+/**
+ * Used for updating users in systems with V1 style user information
+ * (ie MongoDB v2.4 and prior)
+ */
+DB.prototype._updateUserV1 = function(name, updateObject, writeConcern) {
+ var setObj = {};
+ if (updateObject.pwd) {
+ setObj["pwd"] = _hashPassword(name, updateObject.pwd);
+ }
+ if (updateObject.extraData) {
+ setObj["extraData"] = updateObject.extraData;
+ }
+ if (updateObject.roles) {
+ setObj["roles"] = updateObject.roles;
}
- /**
- * Used for updating users in systems with V1 style user information
- * (ie MongoDB v2.4 and prior)
- */
- DB.prototype._updateUserV1 = function(name, updateObject, writeConcern) {
- var setObj = {};
- if (updateObject.pwd) {
- setObj["pwd"] = _hashPassword(name, updateObject.pwd);
- }
- if (updateObject.extraData) {
- setObj["extraData"] = updateObject.extraData;
- }
- if (updateObject.roles) {
- setObj["roles"] = updateObject.roles;
- }
+ this.system.users.update({user: name, userSource: null}, {$set: setObj});
+ var errObj = this.getLastErrorObj(writeConcern['w'], writeConcern['wtimeout']);
+ if (errObj.err) {
+ throw _getErrorWithCode(errObj, "Updating user failed: " + errObj.err);
+ }
+};
- this.system.users.update({user: name, userSource: null}, {$set: setObj});
- var errObj = this.getLastErrorObj(writeConcern['w'], writeConcern['wtimeout']);
- if (errObj.err) {
- throw _getErrorWithCode(errObj, "Updating user failed: " + errObj.err);
- }
- };
+DB.prototype.updateUser = function(name, updateObject, writeConcern) {
+ var cmdObj = {updateUser: name};
+ cmdObj = Object.extend(cmdObj, updateObject);
+ cmdObj['writeConcern'] = writeConcern ? writeConcern : _defaultWriteConcern;
+ this._modifyCommandToDigestPasswordIfNecessary(cmdObj, name);
- DB.prototype.updateUser = function(name, updateObject, writeConcern) {
- var cmdObj = {updateUser: name};
- cmdObj = Object.extend(cmdObj, updateObject);
- cmdObj['writeConcern'] = writeConcern ? writeConcern : _defaultWriteConcern;
- this._modifyCommandToDigestPasswordIfNecessary(cmdObj, name);
+ var res = this.runCommand(cmdObj);
+ if (res.ok) {
+ return;
+ }
- var res = this.runCommand(cmdObj);
- if (res.ok) {
- return;
- }
+ if (res.errmsg == "no such cmd: updateUser") {
+ this._updateUserV1(name, updateObject, cmdObj['writeConcern']);
+ return;
+ }
- if (res.errmsg == "no such cmd: updateUser") {
- this._updateUserV1(name, updateObject, cmdObj['writeConcern']);
- return;
- }
+ throw _getErrorWithCode(res, "Updating user failed: " + res.errmsg);
+};
- throw _getErrorWithCode(res, "Updating user failed: " + res.errmsg);
- };
+DB.prototype.changeUserPassword = function(username, password, writeConcern) {
+ this.updateUser(username, {pwd: password}, writeConcern);
+};
- DB.prototype.changeUserPassword = function(username, password, writeConcern) {
- this.updateUser(username, {pwd: password}, writeConcern);
- };
+DB.prototype.logout = function() {
+ // Logging out doesn't require a session since it manipulates connection state.
+ return this.getMongo().logout(this.getName());
+};
- DB.prototype.logout = function() {
- // Logging out doesn't require a session since it manipulates connection state.
- return this.getMongo().logout(this.getName());
- };
+// For backwards compatibility
+DB.prototype.removeUser = function(username, writeConcern) {
+ print("WARNING: db.removeUser has been deprecated, please use db.dropUser instead");
+ return this.dropUser(username, writeConcern);
+};
- // For backwards compatibility
- DB.prototype.removeUser = function(username, writeConcern) {
- print("WARNING: db.removeUser has been deprecated, please use db.dropUser instead");
- return this.dropUser(username, writeConcern);
+DB.prototype.dropUser = function(username, writeConcern) {
+ var cmdObj = {
+ dropUser: username,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
- DB.prototype.dropUser = function(username, writeConcern) {
- var cmdObj = {
- dropUser: username,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
-
- if (res.ok) {
- return true;
- }
+ if (res.ok) {
+ return true;
+ }
- if (res.code == 11) { // Code 11 = UserNotFound
- return false;
- }
+ if (res.code == 11) { // Code 11 = UserNotFound
+ return false;
+ }
- if (res.errmsg == "no such cmd: dropUsers") {
- return this._removeUserV1(username, cmdObj['writeConcern']);
- }
+ if (res.errmsg == "no such cmd: dropUsers") {
+ return this._removeUserV1(username, cmdObj['writeConcern']);
+ }
- throw _getErrorWithCode(res, res.errmsg);
- };
+ throw _getErrorWithCode(res, res.errmsg);
+};
- /**
- * Used for removing users in systems with V1 style user information
- * (ie MongoDB v2.4 and prior)
- */
- DB.prototype._removeUserV1 = function(username, writeConcern) {
- this.getCollection("system.users").remove({user: username});
+/**
+ * Used for removing users in systems with V1 style user information
+ * (ie MongoDB v2.4 and prior)
+ */
+DB.prototype._removeUserV1 = function(username, writeConcern) {
+ this.getCollection("system.users").remove({user: username});
- var le = this.getLastErrorObj(writeConcern['w'], writeConcern['wtimeout']);
+ var le = this.getLastErrorObj(writeConcern['w'], writeConcern['wtimeout']);
- if (le.err) {
- throw _getErrorWithCode(le, "Couldn't remove user: " + le.err);
- }
+ if (le.err) {
+ throw _getErrorWithCode(le, "Couldn't remove user: " + le.err);
+ }
- if (le.n == 1) {
- return true;
- } else {
- return false;
- }
- };
+ if (le.n == 1) {
+ return true;
+ } else {
+ return false;
+ }
+};
- DB.prototype.dropAllUsers = function(writeConcern) {
- var res = this.runCommand({
- dropAllUsersFromDatabase: 1,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- });
+DB.prototype.dropAllUsers = function(writeConcern) {
+ var res = this.runCommand({
+ dropAllUsersFromDatabase: 1,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
+ });
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
- return res.n;
- };
+ return res.n;
+};
- DB.prototype.__pwHash = function(nonce, username, pass) {
- return hex_md5(nonce + username + _hashPassword(username, pass));
- };
+DB.prototype.__pwHash = function(nonce, username, pass) {
+ return hex_md5(nonce + username + _hashPassword(username, pass));
+};
- DB.prototype._defaultAuthenticationMechanism = null;
+DB.prototype._defaultAuthenticationMechanism = null;
- DB.prototype._getDefaultAuthenticationMechanism = function(username, database) {
- if (username !== undefined) {
- const userid = database + "." + username;
- const result = this.runCommand({isMaster: 1, saslSupportedMechs: userid});
- if (result.ok && (result.saslSupportedMechs !== undefined)) {
- const mechs = result.saslSupportedMechs;
- if (!Array.isArray(mechs)) {
- throw Error("Server replied with invalid saslSupportedMechs response");
- }
+DB.prototype._getDefaultAuthenticationMechanism = function(username, database) {
+ if (username !== undefined) {
+ const userid = database + "." + username;
+ const result = this.runCommand({isMaster: 1, saslSupportedMechs: userid});
+ if (result.ok && (result.saslSupportedMechs !== undefined)) {
+ const mechs = result.saslSupportedMechs;
+ if (!Array.isArray(mechs)) {
+ throw Error("Server replied with invalid saslSupportedMechs response");
+ }
- if ((this._defaultAuthenticationMechanism != null) &&
- mechs.includes(this._defaultAuthenticationMechanism)) {
- return this._defaultAuthenticationMechanism;
- }
+ if ((this._defaultAuthenticationMechanism != null) &&
+ mechs.includes(this._defaultAuthenticationMechanism)) {
+ return this._defaultAuthenticationMechanism;
+ }
- // Never include PLAIN in auto-negotiation.
- const priority = ["GSSAPI", "SCRAM-SHA-256", "SCRAM-SHA-1"];
- for (var i = 0; i < priority.length; ++i) {
- if (mechs.includes(priority[i])) {
- return priority[i];
- }
+ // Never include PLAIN in auto-negotiation.
+ const priority = ["GSSAPI", "SCRAM-SHA-256", "SCRAM-SHA-1"];
+ for (var i = 0; i < priority.length; ++i) {
+ if (mechs.includes(priority[i])) {
+ return priority[i];
}
}
- // If isMaster doesn't support saslSupportedMechs,
- // or if we couldn't agree on a mechanism,
- // then fallthrough to configured default or SCRAM-SHA-1.
- }
-
- // Use the default auth mechanism if set on the command line.
- if (this._defaultAuthenticationMechanism != null)
- return this._defaultAuthenticationMechanism;
-
- return "SCRAM-SHA-1";
- };
-
- DB.prototype._defaultGssapiServiceName = null;
-
- DB.prototype._authOrThrow = function() {
- var params;
- if (arguments.length == 2) {
- params = {user: arguments[0], pwd: arguments[1]};
- } else if (arguments.length == 1) {
- if (typeof(arguments[0]) != "object")
- throw Error("Single-argument form of auth expects a parameter object");
- params = Object.extend({}, arguments[0]);
- } else {
- throw Error(
- "auth expects either (username, password) or ({ user: username, pwd: password })");
- }
-
- if (params.mechanism === undefined) {
- params.mechanism = this._getDefaultAuthenticationMechanism(params.user, this.getName());
- }
-
- if (params.db !== undefined) {
- throw Error("Do not override db field on db.auth(). Use getMongo().auth(), instead.");
}
+ // If isMaster doesn't support saslSupportedMechs,
+ // or if we couldn't agree on a mechanism,
+ // then fallthrough to configured default or SCRAM-SHA-1.
+ }
- if (params.mechanism == "GSSAPI" && params.serviceName == null &&
- this._defaultGssapiServiceName != null) {
- params.serviceName = this._defaultGssapiServiceName;
- }
-
- // Logging in doesn't require a session since it manipulates connection state.
- params.db = this.getName();
- var good = this.getMongo().auth(params);
- if (good) {
- // auth enabled, and should try to use isMaster and replSetGetStatus to build prompt
- this.getMongo().authStatus = {
- authRequired: true,
- isMaster: true,
- replSetGetStatus: true
- };
- }
-
- return good;
- };
-
- DB.prototype.auth = function() {
- var ex;
- try {
- this._authOrThrow.apply(this, arguments);
- } catch (ex) {
- print(ex);
- return 0;
- }
- return 1;
- };
-
- DB.prototype.grantRolesToUser = function(username, roles, writeConcern) {
- var cmdObj = {
- grantRolesToUser: username,
- roles: roles,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+ // Use the default auth mechanism if set on the command line.
+ if (this._defaultAuthenticationMechanism != null)
+ return this._defaultAuthenticationMechanism;
+
+ return "SCRAM-SHA-1";
+};
+
+DB.prototype._defaultGssapiServiceName = null;
+
+DB.prototype._authOrThrow = function() {
+ var params;
+ if (arguments.length == 2) {
+ params = {user: arguments[0], pwd: arguments[1]};
+ } else if (arguments.length == 1) {
+ if (typeof (arguments[0]) != "object")
+ throw Error("Single-argument form of auth expects a parameter object");
+ params = Object.extend({}, arguments[0]);
+ } else {
+ throw Error(
+ "auth expects either (username, password) or ({ user: username, pwd: password })");
+ }
- DB.prototype.revokeRolesFromUser = function(username, roles, writeConcern) {
- var cmdObj = {
- revokeRolesFromUser: username,
- roles: roles,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+ if (params.mechanism === undefined) {
+ params.mechanism = this._getDefaultAuthenticationMechanism(params.user, this.getName());
+ }
- DB.prototype.getUser = function(username, args) {
- if (typeof username != "string") {
- throw Error("User name for getUser shell helper must be a string");
- }
- var cmdObj = {usersInfo: username};
- Object.extend(cmdObj, args);
+ if (params.db !== undefined) {
+ throw Error("Do not override db field on db.auth(). Use getMongo().auth(), instead.");
+ }
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
+ if (params.mechanism == "GSSAPI" && params.serviceName == null &&
+ this._defaultGssapiServiceName != null) {
+ params.serviceName = this._defaultGssapiServiceName;
+ }
- if (res.users.length == 0) {
- return null;
- }
- return res.users[0];
- };
+ // Logging in doesn't require a session since it manipulates connection state.
+ params.db = this.getName();
+ var good = this.getMongo().auth(params);
+ if (good) {
+ // auth enabled, and should try to use isMaster and replSetGetStatus to build prompt
+ this.getMongo().authStatus = {authRequired: true, isMaster: true, replSetGetStatus: true};
+ }
- DB.prototype.getUsers = function(args) {
- var cmdObj = {usersInfo: 1};
- Object.extend(cmdObj, args);
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- var authSchemaIncompatibleCode = 69;
- if (res.code == authSchemaIncompatibleCode ||
- (res.code == null && res.errmsg == "no such cmd: usersInfo")) {
- // Working with 2.4 schema user data
- return this.system.users.find({}).toArray();
- }
+ return good;
+};
- throw _getErrorWithCode(res, res.errmsg);
- }
+DB.prototype.auth = function() {
+ var ex;
+ try {
+ this._authOrThrow.apply(this, arguments);
+ } catch (ex) {
+ print(ex);
+ return 0;
+ }
+ return 1;
+};
- return res.users;
+DB.prototype.grantRolesToUser = function(username, roles, writeConcern) {
+ var cmdObj = {
+ grantRolesToUser: username,
+ roles: roles,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.createRole = function(roleObj, writeConcern) {
- var name = roleObj["role"];
- var cmdObj = {createRole: name};
- cmdObj = Object.extend(cmdObj, roleObj);
- delete cmdObj["role"];
- cmdObj["writeConcern"] = writeConcern ? writeConcern : _defaultWriteConcern;
-
- var res = this.runCommand(cmdObj);
-
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- printjson(roleObj);
+DB.prototype.revokeRolesFromUser = function(username, roles, writeConcern) {
+ var cmdObj = {
+ revokeRolesFromUser: username,
+ roles: roles,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.updateRole = function(name, updateObject, writeConcern) {
- var cmdObj = {updateRole: name};
- cmdObj = Object.extend(cmdObj, updateObject);
- cmdObj['writeConcern'] = writeConcern ? writeConcern : _defaultWriteConcern;
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+DB.prototype.getUser = function(username, args) {
+ if (typeof username != "string") {
+ throw Error("User name for getUser shell helper must be a string");
+ }
+ var cmdObj = {usersInfo: username};
+ Object.extend(cmdObj, args);
- DB.prototype.dropRole = function(name, writeConcern) {
- var cmdObj = {
- dropRole: name,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
- if (res.ok) {
- return true;
- }
+ if (res.users.length == 0) {
+ return null;
+ }
+ return res.users[0];
+};
- if (res.code == 31) { // Code 31 = RoleNotFound
- return false;
+DB.prototype.getUsers = function(args) {
+ var cmdObj = {usersInfo: 1};
+ Object.extend(cmdObj, args);
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ var authSchemaIncompatibleCode = 69;
+ if (res.code == authSchemaIncompatibleCode ||
+ (res.code == null && res.errmsg == "no such cmd: usersInfo")) {
+ // Working with 2.4 schema user data
+ return this.system.users.find({}).toArray();
}
throw _getErrorWithCode(res, res.errmsg);
- };
-
- DB.prototype.dropAllRoles = function(writeConcern) {
- var res = this.runCommand({
- dropAllRolesFromDatabase: 1,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- });
-
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
+ }
- return res.n;
- };
+ return res.users;
+};
- DB.prototype.grantRolesToRole = function(rolename, roles, writeConcern) {
- var cmdObj = {
- grantRolesToRole: rolename,
- roles: roles,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+DB.prototype.createRole = function(roleObj, writeConcern) {
+ var name = roleObj["role"];
+ var cmdObj = {createRole: name};
+ cmdObj = Object.extend(cmdObj, roleObj);
+ delete cmdObj["role"];
+ cmdObj["writeConcern"] = writeConcern ? writeConcern : _defaultWriteConcern;
- DB.prototype.revokeRolesFromRole = function(rolename, roles, writeConcern) {
- var cmdObj = {
- revokeRolesFromRole: rolename,
- roles: roles,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+ var res = this.runCommand(cmdObj);
- DB.prototype.grantPrivilegesToRole = function(rolename, privileges, writeConcern) {
- var cmdObj = {
- grantPrivilegesToRole: rolename,
- privileges: privileges,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+ printjson(roleObj);
+};
+
+DB.prototype.updateRole = function(name, updateObject, writeConcern) {
+ var cmdObj = {updateRole: name};
+ cmdObj = Object.extend(cmdObj, updateObject);
+ cmdObj['writeConcern'] = writeConcern ? writeConcern : _defaultWriteConcern;
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.revokePrivilegesFromRole = function(rolename, privileges, writeConcern) {
- var cmdObj = {
- revokePrivilegesFromRole: rolename,
- privileges: privileges,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+DB.prototype.dropRole = function(name, writeConcern) {
+ var cmdObj = {dropRole: name, writeConcern: writeConcern ? writeConcern : _defaultWriteConcern};
+ var res = this.runCommand(cmdObj);
- DB.prototype.getRole = function(rolename, args) {
- if (typeof rolename != "string") {
- throw Error("Role name for getRole shell helper must be a string");
- }
- var cmdObj = {rolesInfo: rolename};
- Object.extend(cmdObj, args);
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
+ if (res.ok) {
+ return true;
+ }
- if (res.roles.length == 0) {
- return null;
- }
- return res.roles[0];
- };
+ if (res.code == 31) { // Code 31 = RoleNotFound
+ return false;
+ }
- DB.prototype.getRoles = function(args) {
- var cmdObj = {rolesInfo: 1};
- Object.extend(cmdObj, args);
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
+ throw _getErrorWithCode(res, res.errmsg);
+};
- return res.roles;
- };
+DB.prototype.dropAllRoles = function(writeConcern) {
+ var res = this.runCommand({
+ dropAllRolesFromDatabase: 1,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
+ });
- DB.prototype.setWriteConcern = function(wc) {
- if (wc instanceof WriteConcern) {
- this._writeConcern = wc;
- } else {
- this._writeConcern = new WriteConcern(wc);
- }
- };
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
- DB.prototype.getWriteConcern = function() {
- if (this._writeConcern)
- return this._writeConcern;
+ return res.n;
+};
- {
- const session = this.getSession();
- return session._getSessionAwareClient().getWriteConcern(session);
- }
+DB.prototype.grantRolesToRole = function(rolename, roles, writeConcern) {
+ var cmdObj = {
+ grantRolesToRole: rolename,
+ roles: roles,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.unsetWriteConcern = function() {
- delete this._writeConcern;
+DB.prototype.revokeRolesFromRole = function(rolename, roles, writeConcern) {
+ var cmdObj = {
+ revokeRolesFromRole: rolename,
+ roles: roles,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.getLogComponents = function() {
- return this.getMongo().getLogComponents(this.getSession());
+DB.prototype.grantPrivilegesToRole = function(rolename, privileges, writeConcern) {
+ var cmdObj = {
+ grantPrivilegesToRole: rolename,
+ privileges: privileges,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.setLogLevel = function(logLevel, component) {
- return this.getMongo().setLogLevel(logLevel, component, this.getSession());
+DB.prototype.revokePrivilegesFromRole = function(rolename, privileges, writeConcern) {
+ var cmdObj = {
+ revokePrivilegesFromRole: rolename,
+ privileges: privileges,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.watch = function(pipeline, options) {
- pipeline = pipeline || [];
- assert(pipeline instanceof Array, "'pipeline' argument must be an array");
-
- let changeStreamStage;
- [changeStreamStage, aggOptions] = this.getMongo()._extractChangeStreamOptions(options);
- pipeline.unshift(changeStreamStage);
- return this._runAggregate({aggregate: 1, pipeline: pipeline}, aggOptions);
- };
+DB.prototype.getRole = function(rolename, args) {
+ if (typeof rolename != "string") {
+ throw Error("Role name for getRole shell helper must be a string");
+ }
+ var cmdObj = {rolesInfo: rolename};
+ Object.extend(cmdObj, args);
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
- DB.prototype.getFreeMonitoringStatus = function() {
- 'use strict';
- return assert.commandWorked(this.adminCommand({getFreeMonitoringStatus: 1}));
- };
+ if (res.roles.length == 0) {
+ return null;
+ }
+ return res.roles[0];
+};
+
+DB.prototype.getRoles = function(args) {
+ var cmdObj = {rolesInfo: 1};
+ Object.extend(cmdObj, args);
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
- DB.prototype.enableFreeMonitoring = function() {
- 'use strict';
- const isMaster = this.isMaster();
- if (isMaster.ismaster == false) {
- print("ERROR: db.enableFreeMonitoring() may only be run on a primary");
- return;
- }
+ return res.roles;
+};
- assert.commandWorked(this.adminCommand({setFreeMonitoring: 1, action: 'enable'}));
+DB.prototype.setWriteConcern = function(wc) {
+ if (wc instanceof WriteConcern) {
+ this._writeConcern = wc;
+ } else {
+ this._writeConcern = new WriteConcern(wc);
+ }
+};
- const cmd = this.adminCommand({getFreeMonitoringStatus: 1});
- if (!cmd.ok && (cmd.code == ErrorCode.Unauthorized)) {
- // Edge case: It's technically possible that a user can change free-mon state,
- // but is not allowed to inspect it.
- print("Successfully initiated free monitoring, but unable to determine status " +
- "as you lack the 'checkFreeMonitoringStatus' privilege.");
- return;
- }
- assert.commandWorked(cmd);
+DB.prototype.getWriteConcern = function() {
+ if (this._writeConcern)
+ return this._writeConcern;
- if (cmd.state !== 'enabled') {
- const url = this.adminCommand({'getParameter': 1, 'cloudFreeMonitoringEndpointURL': 1})
- .cloudFreeMonitoringEndpointURL;
+ {
+ const session = this.getSession();
+ return session._getSessionAwareClient().getWriteConcern(session);
+ }
+};
+
+DB.prototype.unsetWriteConcern = function() {
+ delete this._writeConcern;
+};
+
+DB.prototype.getLogComponents = function() {
+ return this.getMongo().getLogComponents(this.getSession());
+};
+
+DB.prototype.setLogLevel = function(logLevel, component) {
+ return this.getMongo().setLogLevel(logLevel, component, this.getSession());
+};
+
+DB.prototype.watch = function(pipeline, options) {
+ pipeline = pipeline || [];
+ assert(pipeline instanceof Array, "'pipeline' argument must be an array");
+
+ let changeStreamStage;
+ [changeStreamStage, aggOptions] = this.getMongo()._extractChangeStreamOptions(options);
+ pipeline.unshift(changeStreamStage);
+ return this._runAggregate({aggregate: 1, pipeline: pipeline}, aggOptions);
+};
+
+DB.prototype.getFreeMonitoringStatus = function() {
+ 'use strict';
+ return assert.commandWorked(this.adminCommand({getFreeMonitoringStatus: 1}));
+};
+
+DB.prototype.enableFreeMonitoring = function() {
+ 'use strict';
+ const isMaster = this.isMaster();
+ if (isMaster.ismaster == false) {
+ print("ERROR: db.enableFreeMonitoring() may only be run on a primary");
+ return;
+ }
- print("Unable to get immediate response from the Cloud Monitoring service. We will" +
- "continue to retry in the background. Please check your firewall " +
- "settings to ensure that mongod can communicate with \"" + url + "\"");
- return;
- }
+ assert.commandWorked(this.adminCommand({setFreeMonitoring: 1, action: 'enable'}));
- print(tojson(cmd));
- };
+ const cmd = this.adminCommand({getFreeMonitoringStatus: 1});
+ if (!cmd.ok && (cmd.code == ErrorCode.Unauthorized)) {
+ // Edge case: It's technically possible that a user can change free-mon state,
+ // but is not allowed to inspect it.
+ print("Successfully initiated free monitoring, but unable to determine status " +
+ "as you lack the 'checkFreeMonitoringStatus' privilege.");
+ return;
+ }
+ assert.commandWorked(cmd);
- DB.prototype.disableFreeMonitoring = function() {
- 'use strict';
- assert.commandWorked(this.adminCommand({setFreeMonitoring: 1, action: 'disable'}));
- };
+ if (cmd.state !== 'enabled') {
+ const url = this.adminCommand({'getParameter': 1, 'cloudFreeMonitoringEndpointURL': 1})
+ .cloudFreeMonitoringEndpointURL;
- // Writing `this.hasOwnProperty` would cause DB.prototype.getCollection() to be called since the
- // DB's getProperty() handler in C++ takes precedence when a property isn't defined on the DB
- // instance directly. The "hasOwnProperty" property is defined on Object.prototype, so we must
- // resort to using the function explicitly ourselves.
- (function(hasOwnProperty) {
- DB.prototype.getSession = function() {
- if (!hasOwnProperty.call(this, "_session")) {
- this._session = this.getMongo()._getDefaultSession();
- }
- return this._session;
- };
- })(Object.prototype.hasOwnProperty);
+ print("Unable to get immediate response from the Cloud Monitoring service. We will" +
+ "continue to retry in the background. Please check your firewall " +
+ "settings to ensure that mongod can communicate with \"" + url + "\"");
+ return;
+ }
+ print(tojson(cmd));
+};
+
+DB.prototype.disableFreeMonitoring = function() {
+ 'use strict';
+ assert.commandWorked(this.adminCommand({setFreeMonitoring: 1, action: 'disable'}));
+};
+
+// Writing `this.hasOwnProperty` would cause DB.prototype.getCollection() to be called since the
+// DB's getProperty() handler in C++ takes precedence when a property isn't defined on the DB
+// instance directly. The "hasOwnProperty" property is defined on Object.prototype, so we must
+// resort to using the function explicitly ourselves.
+(function(hasOwnProperty) {
+DB.prototype.getSession = function() {
+ if (!hasOwnProperty.call(this, "_session")) {
+ this._session = this.getMongo()._getDefaultSession();
+ }
+ return this._session;
+};
+})(Object.prototype.hasOwnProperty);
}());
diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp
index 4f4e55669e5..12911840cb9 100644
--- a/src/mongo/shell/dbshell.cpp
+++ b/src/mongo/shell/dbshell.cpp
@@ -175,7 +175,7 @@ enum ShellExitCode : int {
};
Scope* shellMainScope;
-}
+} // namespace mongo
bool isSessionTimedOut() {
static Date_t previousCommandTime = Date_t::now();
@@ -794,8 +794,8 @@ int _main(int argc, char* argv[], char** envp) {
#else
wchar_t programDataPath[MAX_PATH];
if (S_OK == SHGetFolderPathW(nullptr, CSIDL_COMMON_APPDATA, nullptr, 0, programDataPath)) {
- rcGlobalLocation = str::stream() << toUtf8String(programDataPath)
- << "\\MongoDB\\mongorc.js";
+ rcGlobalLocation = str::stream()
+ << toUtf8String(programDataPath) << "\\MongoDB\\mongorc.js";
}
#endif
if (!rcGlobalLocation.empty() && ::mongo::shell_utils::fileExists(rcGlobalLocation)) {
@@ -875,9 +875,9 @@ int _main(int argc, char* argv[], char** envp) {
rcLocation = str::stream() << getenv("HOME") << "/.mongorc.js";
#else
if (getenv("HOMEDRIVE") != nullptr && getenv("HOMEPATH") != nullptr)
- rcLocation = str::stream() << toUtf8String(_wgetenv(L"HOMEDRIVE"))
- << toUtf8String(_wgetenv(L"HOMEPATH"))
- << "\\.mongorc.js";
+ rcLocation = str::stream()
+ << toUtf8String(_wgetenv(L"HOMEDRIVE")) << toUtf8String(_wgetenv(L"HOMEPATH"))
+ << "\\.mongorc.js";
#endif
if (!rcLocation.empty() && ::mongo::shell_utils::fileExists(rcLocation)) {
hasMongoRC = true;
diff --git a/src/mongo/shell/encrypted_dbclient_base.cpp b/src/mongo/shell/encrypted_dbclient_base.cpp
index bec5bb8ae5e..be82f6b97bb 100644
--- a/src/mongo/shell/encrypted_dbclient_base.cpp
+++ b/src/mongo/shell/encrypted_dbclient_base.cpp
@@ -138,7 +138,7 @@ BSONObj EncryptedDBClientBase::encryptDecryptCommand(const BSONObj& object,
uassert(31096,
"Object too deep to be encrypted. Exceeded stack depth.",
frameStack.size() < BSONDepth::kDefaultMaxAllowableDepth);
- auto & [ iterator, builder ] = frameStack.top();
+ auto& [iterator, builder] = frameStack.top();
if (iterator.more()) {
BSONElement elem = iterator.next();
if (elem.type() == BSONType::Object) {
@@ -609,7 +609,7 @@ std::shared_ptr<SymmetricKey> EncryptedDBClientBase::getDataKey(const UUID& uuid
auto ts_new = Date_t::now();
if (_datakeyCache.hasKey(uuid)) {
- auto[key, ts] = _datakeyCache.find(uuid)->second;
+ auto [key, ts] = _datakeyCache.find(uuid)->second;
if (ts_new - ts < kCacheInvalidationTime) {
return key;
} else {
diff --git a/src/mongo/shell/encrypted_shell_options.h b/src/mongo/shell/encrypted_shell_options.h
index f839c637d9a..b4b30aba2fe 100644
--- a/src/mongo/shell/encrypted_shell_options.h
+++ b/src/mongo/shell/encrypted_shell_options.h
@@ -42,4 +42,4 @@ struct EncryptedShellGlobalParams {
};
extern EncryptedShellGlobalParams encryptedShellGlobalParams;
-}
+} // namespace mongo
diff --git a/src/mongo/shell/explain_query.js b/src/mongo/shell/explain_query.js
index 78e57c86e69..89a922e225a 100644
--- a/src/mongo/shell/explain_query.js
+++ b/src/mongo/shell/explain_query.js
@@ -4,7 +4,6 @@
//
var DBExplainQuery = (function() {
-
//
// Private methods.
//
@@ -15,7 +14,7 @@ var DBExplainQuery = (function() {
* is implemented here for backwards compatibility.
*/
function removeVerboseFields(obj) {
- if (typeof(obj) !== "object") {
+ if (typeof (obj) !== "object") {
return;
}
@@ -23,7 +22,7 @@ var DBExplainQuery = (function() {
delete obj.oldPlan;
delete obj.stats;
- if (typeof(obj.length) === "number") {
+ if (typeof (obj.length) === "number") {
for (var i = 0; i < obj.length; i++) {
removeVerboseFields(obj[i]);
}
diff --git a/src/mongo/shell/explainable.js b/src/mongo/shell/explainable.js
index 637d19d2bf7..4f32af22221 100644
--- a/src/mongo/shell/explainable.js
+++ b/src/mongo/shell/explainable.js
@@ -4,7 +4,6 @@
//
var Explainable = (function() {
-
var parseVerbosity = function(verbosity) {
// Truthy non-strings are interpreted as "allPlansExecution" verbosity.
if (verbosity && (typeof verbosity !== "string")) {
@@ -19,8 +18,10 @@ var Explainable = (function() {
// If we're here, then the verbosity is a string. We reject invalid strings.
if (verbosity !== "queryPlanner" && verbosity !== "executionStats" &&
verbosity !== "allPlansExecution") {
- throw Error("explain verbosity must be one of {" + "'queryPlanner'," +
- "'executionStats'," + "'allPlansExecution'}");
+ throw Error("explain verbosity must be one of {" +
+ "'queryPlanner'," +
+ "'executionStats'," +
+ "'allPlansExecution'}");
}
return verbosity;
diff --git a/src/mongo/shell/kms_aws.cpp b/src/mongo/shell/kms_aws.cpp
index a0c3ecffe06..b923a59355c 100644
--- a/src/mongo/shell/kms_aws.cpp
+++ b/src/mongo/shell/kms_aws.cpp
@@ -449,7 +449,7 @@ public:
}
};
-} // namspace
+} // namespace
MONGO_INITIALIZER(KMSRegister)(::mongo::InitializerContext* context) {
kms_message_init();
diff --git a/src/mongo/shell/kms_local.cpp b/src/mongo/shell/kms_local.cpp
index 628ea9ed9c2..32d5f760383 100644
--- a/src/mongo/shell/kms_local.cpp
+++ b/src/mongo/shell/kms_local.cpp
@@ -143,7 +143,7 @@ public:
}
};
-} // namspace
+} // namespace
MONGO_INITIALIZER(LocalKMSRegister)(::mongo::InitializerContext* context) {
KMSServiceController::registerFactory(KMSProviderEnum::local,
diff --git a/src/mongo/shell/linenoise.cpp b/src/mongo/shell/linenoise.cpp
index 501103aae4a..ed8910ac73c 100644
--- a/src/mongo/shell/linenoise.cpp
+++ b/src/mongo/shell/linenoise.cpp
@@ -126,16 +126,16 @@ using std::vector;
using std::unique_ptr;
-using linenoise_utf8::UChar8;
-using linenoise_utf8::UChar32;
-using linenoise_utf8::copyString8to32;
using linenoise_utf8::copyString32;
using linenoise_utf8::copyString32to8;
+using linenoise_utf8::copyString8to32;
using linenoise_utf8::strlen32;
using linenoise_utf8::strncmp32;
-using linenoise_utf8::write32;
-using linenoise_utf8::Utf8String;
+using linenoise_utf8::UChar32;
+using linenoise_utf8::UChar8;
using linenoise_utf8::Utf32String;
+using linenoise_utf8::Utf8String;
+using linenoise_utf8::write32;
struct linenoiseCompletions {
vector<Utf32String> completionStrings;
@@ -1234,7 +1234,7 @@ static UChar32 setMetaRoutine(UChar32 c) {
return doDispatch(c, initialDispatch);
}
-} // namespace EscapeSequenceProcessing // move these out of global namespace
+} // namespace EscapeSequenceProcessing
#endif // #ifndef _WIN32
diff --git a/src/mongo/shell/linenoise_utf8.h b/src/mongo/shell/linenoise_utf8.h
index d5d4c6db7d9..dca7a8b0ef4 100644
--- a/src/mongo/shell/linenoise_utf8.h
+++ b/src/mongo/shell/linenoise_utf8.h
@@ -141,10 +141,7 @@ struct UtfStringMixin {
UtfStringMixin() : _len(0), _cap(0), _chars(0) {}
UtfStringMixin(const UtfStringMixin& other) // copies like std::string
- : _len(other._len),
- _cap(other._len + 1),
- _chars(other._chars),
- _str(new char_t[_cap]) {
+ : _len(other._len), _cap(other._len + 1), _chars(other._chars), _str(new char_t[_cap]) {
memcpy(_str.get(), other._str.get(), _cap * sizeof(char_t));
}
diff --git a/src/mongo/shell/mk_wcwidth.cpp b/src/mongo/shell/mk_wcwidth.cpp
index cb4674344f5..1a09cc2e874 100644
--- a/src/mongo/shell/mk_wcwidth.cpp
+++ b/src/mongo/shell/mk_wcwidth.cpp
@@ -177,15 +177,15 @@ int mk_wcwidth(int ucs) {
return 1 +
(ucs >= 0x1100 &&
(ucs <= 0x115f || /* Hangul Jamo init. consonants */
- ucs == 0x2329 ||
- ucs == 0x232a || (ucs >= 0x2e80 && ucs <= 0xa4cf && ucs != 0x303f) || /* CJK ... Yi */
- (ucs >= 0xac00 && ucs <= 0xd7a3) || /* Hangul Syllables */
- (ucs >= 0xf900 && ucs <= 0xfaff) || /* CJK Compatibility Ideographs */
- (ucs >= 0xfe10 && ucs <= 0xfe19) || /* Vertical forms */
- (ucs >= 0xfe30 && ucs <= 0xfe6f) || /* CJK Compatibility Forms */
- (ucs >= 0xff00 && ucs <= 0xff60) || /* Fullwidth Forms */
- (ucs >= 0xffe0 && ucs <= 0xffe6) ||
- (ucs >= 0x20000 && ucs <= 0x2fffd) || (ucs >= 0x30000 && ucs <= 0x3fffd)));
+ ucs == 0x2329 || ucs == 0x232a ||
+ (ucs >= 0x2e80 && ucs <= 0xa4cf && ucs != 0x303f) || /* CJK ... Yi */
+ (ucs >= 0xac00 && ucs <= 0xd7a3) || /* Hangul Syllables */
+ (ucs >= 0xf900 && ucs <= 0xfaff) || /* CJK Compatibility Ideographs */
+ (ucs >= 0xfe10 && ucs <= 0xfe19) || /* Vertical forms */
+ (ucs >= 0xfe30 && ucs <= 0xfe6f) || /* CJK Compatibility Forms */
+ (ucs >= 0xff00 && ucs <= 0xff60) || /* Fullwidth Forms */
+ (ucs >= 0xffe0 && ucs <= 0xffe6) || (ucs >= 0x20000 && ucs <= 0x2fffd) ||
+ (ucs >= 0x30000 && ucs <= 0x3fffd)));
}
diff --git a/src/mongo/shell/mongo.js b/src/mongo/shell/mongo.js
index 39dbd402f7d..481aff7c6ad 100644
--- a/src/mongo/shell/mongo.js
+++ b/src/mongo/shell/mongo.js
@@ -45,7 +45,7 @@ Mongo.prototype.getDB = function(name) {
// There is a weird issue where typeof(db._name) !== "string" when the db name
// is created from objects returned from native C++ methods.
// This hack ensures that the db._name is always a string.
- if (typeof(name) === "object") {
+ if (typeof (name) === "object") {
name = name.toString();
}
return new DB(this, name);
@@ -84,7 +84,6 @@ Mongo.prototype.getDBs = function(driverSession = this._getDefaultSession(),
filter = undefined,
nameOnly = undefined,
authorizedDatabases = undefined) {
-
return function(driverSession, filter, nameOnly, authorizedDatabases) {
'use strict';
@@ -227,7 +226,7 @@ Mongo.prototype.tojson = Mongo.prototype.toString;
* Note that this object only keeps a shallow copy of this array.
*/
Mongo.prototype.setReadPref = function(mode, tagSet) {
- if ((this._readPrefMode === "primary") && (typeof(tagSet) !== "undefined") &&
+ if ((this._readPrefMode === "primary") && (typeof (tagSet) !== "undefined") &&
(Object.keys(tagSet).length > 0)) {
// we allow empty arrays/objects or no tagSet for compatibility reasons
throw Error("Can not supply tagSet with readPref mode primary");
@@ -252,7 +251,7 @@ Mongo.prototype.getReadPrefTagSet = function() {
// Returns a readPreference object of the type expected by mongos.
Mongo.prototype.getReadPref = function() {
var obj = {}, mode, tagSet;
- if (typeof(mode = this.getReadPrefMode()) === "string") {
+ if (typeof (mode = this.getReadPrefMode()) === "string") {
obj.mode = mode;
} else {
return null;
@@ -381,7 +380,8 @@ connect = function(url, user, pass) {
return db;
};
-/** deprecated, use writeMode below
+/**
+ * deprecated, use writeMode below
*
*/
Mongo.prototype.useWriteCommands = function() {
@@ -410,7 +410,6 @@ Mongo.prototype.hasExplainCommand = function() {
*/
Mongo.prototype.writeMode = function() {
-
if ('_writeMode' in this) {
return this._writeMode;
}
@@ -539,8 +538,8 @@ Mongo.prototype.startSession = function startSession(options = {}) {
// Only log this message if we are running a test
if (typeof TestData === "object" && TestData.testName) {
jsTest.log("New session started with sessionID: " +
- tojsononeline(newDriverSession.getSessionId()) + " and options: " +
- tojsononeline(options));
+ tojsononeline(newDriverSession.getSessionId()) +
+ " and options: " + tojsononeline(options));
}
return newDriverSession;
@@ -560,7 +559,7 @@ Mongo.prototype._getDefaultSession = function getDefaultSession() {
this._setDummyDefaultSession();
} else {
print("ERROR: Implicit session failed: " + e.message);
- throw(e);
+ throw (e);
}
}
} else {
diff --git a/src/mongo/shell/query.js b/src/mongo/shell/query.js
index 7bd167b5437..bd451166e9d 100644
--- a/src/mongo/shell/query.js
+++ b/src/mongo/shell/query.js
@@ -2,7 +2,6 @@
if (typeof DBQuery == "undefined") {
DBQuery = function(mongo, db, collection, ns, query, fields, limit, skip, batchSize, options) {
-
this._mongo = mongo; // 0
this._db = db; // 1
this._collection = collection; // 2
@@ -399,8 +398,8 @@ DBQuery.prototype.countReturn = function() {
};
/**
-* iterative count - only for testing
-*/
+ * iterative count - only for testing
+ */
DBQuery.prototype.itcount = function() {
var num = 0;
@@ -555,7 +554,6 @@ DBQuery.prototype.shellPrint = function() {
} catch (e) {
print(e);
}
-
};
DBQuery.prototype.toString = function() {
@@ -567,12 +565,12 @@ DBQuery.prototype.toString = function() {
//
/**
-* Get partial results from a mongos if some shards are down (instead of throwing an error).
-*
-* @method
-* @see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#op-query
-* @return {DBQuery}
-*/
+ * Get partial results from a mongos if some shards are down (instead of throwing an error).
+ *
+ * @method
+ * @see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#op-query
+ * @return {DBQuery}
+ */
DBQuery.prototype.allowPartialResults = function() {
this._checkModify();
this.addOption(DBQuery.Option.partial);
@@ -580,13 +578,13 @@ DBQuery.prototype.allowPartialResults = function() {
};
/**
-* The server normally times out idle cursors after an inactivity period (10 minutes)
-* to prevent excess memory use. Set this option to prevent that.
-*
-* @method
-* @see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#op-query
-* @return {DBQuery}
-*/
+ * The server normally times out idle cursors after an inactivity period (10 minutes)
+ * to prevent excess memory use. Set this option to prevent that.
+ *
+ * @method
+ * @see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#op-query
+ * @return {DBQuery}
+ */
DBQuery.prototype.noCursorTimeout = function() {
this._checkModify();
this.addOption(DBQuery.Option.noTimeout);
@@ -594,13 +592,13 @@ DBQuery.prototype.noCursorTimeout = function() {
};
/**
-* Limits the fields to return for all matching documents.
-*
-* @method
-* @see http://docs.mongodb.org/manual/tutorial/project-fields-from-query-results/
-* @param {object} document Document specifying the projection of the resulting documents.
-* @return {DBQuery}
-*/
+ * Limits the fields to return for all matching documents.
+ *
+ * @method
+ * @see http://docs.mongodb.org/manual/tutorial/project-fields-from-query-results/
+ * @param {object} document Document specifying the projection of the resulting documents.
+ * @return {DBQuery}
+ */
DBQuery.prototype.projection = function(document) {
this._checkModify();
this._fields = document;
@@ -608,14 +606,14 @@ DBQuery.prototype.projection = function(document) {
};
/**
-* Specify cursor as a tailable cursor, allowing to specify if it will use awaitData
-*
-* @method
-* @see http://docs.mongodb.org/manual/tutorial/create-tailable-cursor/
-* @param {boolean} [awaitData=true] cursor blocks for a few seconds to wait for data if no documents
-*found.
-* @return {DBQuery}
-*/
+ * Specify cursor as a tailable cursor, allowing to specify if it will use awaitData
+ *
+ * @method
+ * @see http://docs.mongodb.org/manual/tutorial/create-tailable-cursor/
+ * @param {boolean} [awaitData=true] cursor blocks for a few seconds to wait for data if no
+ *documents found.
+ * @return {DBQuery}
+ */
DBQuery.prototype.tailable = function(awaitData) {
this._checkModify();
this.addOption(DBQuery.Option.tailable);
@@ -629,13 +627,13 @@ DBQuery.prototype.tailable = function(awaitData) {
};
/**
-* Specify a document containing modifiers for the query.
-*
-* @method
-* @see http://docs.mongodb.org/manual/reference/operator/query-modifier/
-* @param {object} document A document containing modifers to apply to the cursor.
-* @return {DBQuery}
-*/
+ * Specify a document containing modifiers for the query.
+ *
+ * @method
+ * @see http://docs.mongodb.org/manual/reference/operator/query-modifier/
+ * @param {object} document A document containing modifers to apply to the cursor.
+ * @return {DBQuery}
+ */
DBQuery.prototype.modifiers = function(document) {
this._checkModify();
@@ -802,16 +800,16 @@ DBCommandCursor.prototype._runGetMoreCommand = function() {
assert.commandWorked(cmdRes, () => "getMore command failed: " + tojson(cmdRes));
if (this._ns !== cmdRes.cursor.ns) {
- throw Error("unexpected collection in getMore response: " + this._ns + " != " +
- cmdRes.cursor.ns);
+ throw Error("unexpected collection in getMore response: " + this._ns +
+ " != " + cmdRes.cursor.ns);
}
if (!cmdRes.cursor.id.compare(NumberLong("0"))) {
this._cursorHandle.zeroCursorId();
this._cursorid = NumberLong("0");
} else if (this._cursorid.compare(cmdRes.cursor.id)) {
- throw Error("unexpected cursor id: " + this._cursorid.toString() + " != " +
- cmdRes.cursor.id.toString());
+ throw Error("unexpected cursor id: " + this._cursorid.toString() +
+ " != " + cmdRes.cursor.id.toString());
}
// If the command result represents a change stream cursor, update our postBatchResumeToken.
diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js
index 525be55a6fc..5e8a14f7d3c 100644
--- a/src/mongo/shell/replsettest.js
+++ b/src/mongo/shell/replsettest.js
@@ -282,13 +282,14 @@ var ReplSetTest = function(opts) {
if (status.members[i].name == node.host || status.members[i].name == node.name) {
for (var j = 0; j < states.length; j++) {
if (printStatus) {
- print("Status -- " + " current state: " + status.members[i][ind] +
+ print("Status -- " +
+ " current state: " + status.members[i][ind] +
", target state : " + states[j]);
}
- if (typeof(states[j]) != "number") {
- throw new Error("State was not an number -- type:" + typeof(states[j]) +
- ", value:" + states[j]);
+ if (typeof (states[j]) != "number") {
+ throw new Error("State was not an number -- type:" +
+ typeof (states[j]) + ", value:" + states[j]);
}
if (status.members[i][ind] == states[j]) {
foundState = states[j];
@@ -299,7 +300,6 @@ var ReplSetTest = function(opts) {
}
return false;
-
}, "waiting for state indicator " + ind + " for " + timeout + "ms", timeout);
// If we were waiting for the node to step down, wait until we can connect to it again,
@@ -894,7 +894,6 @@ var ReplSetTest = function(opts) {
* and returns the 'config' object unchanged. Does not affect 'config' when running CSRS.
*/
this._updateConfigIfNotDurable = function(config) {
-
// Get a replica set node (check for use of bridge).
var replNode = _useBridge ? _unbridgedNodes[0] : this.nodes[0];
@@ -936,9 +935,9 @@ var ReplSetTest = function(opts) {
const result = assert.commandWorkedOrFailedWithCode(
master.runCommand(cmd),
[
- ErrorCodes.NodeNotFound,
- ErrorCodes.NewReplicaSetConfigurationIncompatible,
- ErrorCodes.InterruptedDueToReplStateChange
+ ErrorCodes.NodeNotFound,
+ ErrorCodes.NewReplicaSetConfigurationIncompatible,
+ ErrorCodes.InterruptedDueToReplStateChange
],
errorMsg);
return result.ok;
@@ -1032,7 +1031,7 @@ var ReplSetTest = function(opts) {
} else {
Object.keys(self.nodeOptions).forEach(function(key, index) {
let val = self.nodeOptions[key];
- if (typeof(val) === "object" &&
+ if (typeof (val) === "object" &&
(val.hasOwnProperty("shardsvr") ||
val.hasOwnProperty("binVersion") &&
// Should not wait for keys if version is less than 3.6
@@ -1043,7 +1042,7 @@ var ReplSetTest = function(opts) {
});
if (self.startOptions != undefined) {
let val = self.startOptions;
- if (typeof(val) === "object" &&
+ if (typeof (val) === "object" &&
(val.hasOwnProperty("shardsvr") ||
val.hasOwnProperty("binVersion") &&
// Should not wait for keys if version is less than 3.6
@@ -1564,7 +1563,8 @@ var ReplSetTest = function(opts) {
this.getHashesUsingSessions = function(sessions, dbName, {
filterCapped: filterCapped = true,
- filterMapReduce: filterMapReduce = true, readAtClusterTime,
+ filterMapReduce: filterMapReduce = true,
+ readAtClusterTime,
} = {}) {
return sessions.map(session => {
const commandObj = {dbHash: 1};
@@ -1896,7 +1896,8 @@ var ReplSetTest = function(opts) {
primarySession, secondarySession, dbName, collName);
for (let {
- primary: primaryDoc, secondary: secondaryDoc,
+ primary: primaryDoc,
+ secondary: secondaryDoc,
} of diff.docsWithDifferentContents) {
print(`Mismatching documents between the primary ${primary.host}` +
` and the secondary ${secondary.host}:`);
@@ -1994,7 +1995,6 @@ var ReplSetTest = function(opts) {
dumpCollectionDiff(primary, secondary, dbName, collName);
success = false;
}
-
});
// Check that collection information is consistent on the primary and
@@ -2363,7 +2363,7 @@ var ReplSetTest = function(opts) {
// Turn off periodic noop writes for replica sets by default.
options.setParameter = options.setParameter || {};
- if (typeof(options.setParameter) === "string") {
+ if (typeof (options.setParameter) === "string") {
var eqIdx = options.setParameter.indexOf("=");
if (eqIdx != -1) {
var param = options.setParameter.substring(0, eqIdx);
diff --git a/src/mongo/shell/servers.js b/src/mongo/shell/servers.js
index 4ebf51ec693..e4633c42a7e 100644
--- a/src/mongo/shell/servers.js
+++ b/src/mongo/shell/servers.js
@@ -2,1112 +2,1192 @@ var MongoRunner, _startMongod, startMongoProgram, runMongoProgram, startMongoPro
myPort;
(function() {
- "use strict";
+"use strict";
- var shellVersion = version;
+var shellVersion = version;
- // Record the exit codes of mongod and mongos processes that crashed during startup keyed by
- // port. This map is cleared when MongoRunner._startWithArgs and MongoRunner.stopMongod/s are
- // called.
- var serverExitCodeMap = {};
+// Record the exit codes of mongod and mongos processes that crashed during startup keyed by
+// port. This map is cleared when MongoRunner._startWithArgs and MongoRunner.stopMongod/s are
+// called.
+var serverExitCodeMap = {};
- var _parsePath = function() {
- var dbpath = "";
- for (var i = 0; i < arguments.length; ++i)
- if (arguments[i] == "--dbpath")
- dbpath = arguments[i + 1];
+var _parsePath = function() {
+ var dbpath = "";
+ for (var i = 0; i < arguments.length; ++i)
+ if (arguments[i] == "--dbpath")
+ dbpath = arguments[i + 1];
- if (dbpath == "")
- throw Error("No dbpath specified");
+ if (dbpath == "")
+ throw Error("No dbpath specified");
- return dbpath;
- };
+ return dbpath;
+};
- var _parsePort = function() {
- var port = "";
- for (var i = 0; i < arguments.length; ++i)
- if (arguments[i] == "--port")
- port = arguments[i + 1];
+var _parsePort = function() {
+ var port = "";
+ for (var i = 0; i < arguments.length; ++i)
+ if (arguments[i] == "--port")
+ port = arguments[i + 1];
- if (port == "")
- throw Error("No port specified");
- return port;
- };
+ if (port == "")
+ throw Error("No port specified");
+ return port;
+};
- var createMongoArgs = function(binaryName, args) {
- if (!Array.isArray(args)) {
- throw new Error("The second argument to createMongoArgs must be an array");
- }
+var createMongoArgs = function(binaryName, args) {
+ if (!Array.isArray(args)) {
+ throw new Error("The second argument to createMongoArgs must be an array");
+ }
- var fullArgs = [binaryName];
-
- if (args.length == 1 && isObject(args[0])) {
- var o = args[0];
- for (var k in o) {
- if (o.hasOwnProperty(k)) {
- if (k == "v" && isNumber(o[k])) {
- var n = o[k];
- if (n > 0) {
- if (n > 10)
- n = 10;
- var temp = "-";
- while (n-- > 0)
- temp += "v";
- fullArgs.push(temp);
- }
- } else {
- fullArgs.push("--" + k);
- if (o[k] != "")
- fullArgs.push("" + o[k]);
+ var fullArgs = [binaryName];
+
+ if (args.length == 1 && isObject(args[0])) {
+ var o = args[0];
+ for (var k in o) {
+ if (o.hasOwnProperty(k)) {
+ if (k == "v" && isNumber(o[k])) {
+ var n = o[k];
+ if (n > 0) {
+ if (n > 10)
+ n = 10;
+ var temp = "-";
+ while (n-- > 0)
+ temp += "v";
+ fullArgs.push(temp);
}
+ } else {
+ fullArgs.push("--" + k);
+ if (o[k] != "")
+ fullArgs.push("" + o[k]);
}
}
- } else {
- for (var i = 0; i < args.length; i++)
- fullArgs.push(args[i]);
}
+ } else {
+ for (var i = 0; i < args.length; i++)
+ fullArgs.push(args[i]);
+ }
- return fullArgs;
- };
-
- MongoRunner = function() {};
-
- MongoRunner.dataDir = "/data/db";
- MongoRunner.dataPath = "/data/db/";
-
- MongoRunner.mongodPath = "mongod";
- MongoRunner.mongosPath = "mongos";
- MongoRunner.mongoShellPath = "mongo";
-
- MongoRunner.VersionSub = function(pattern, version) {
- this.pattern = pattern;
- this.version = version;
- };
-
- /**
- * Returns an array of version elements from a version string.
- *
- * "3.3.4-fade3783" -> ["3", "3", "4-fade3783" ]
- * "3.2" -> [ "3", "2" ]
- * 3 -> exception: versions must have at least two components.
- */
- var convertVersionStringToArray = function(versionString) {
- assert("" !== versionString, "Version strings must not be empty");
- var versionArray = versionString.split('.');
-
- assert.gt(versionArray.length,
- 1,
- "MongoDB versions must have at least two components to compare, but \"" +
- versionString + "\" has " + versionArray.length);
- return versionArray;
- };
-
- /**
- * Returns the major version string from a version string.
- *
- * 3.3.4-fade3783 -> 3.3
- * 3.2 -> 3.2
- * 3 -> exception: versions must have at least two components.
- */
- var extractMajorVersionFromVersionString = function(versionString) {
- return convertVersionStringToArray(versionString).slice(0, 2).join('.');
- };
-
- // These patterns allow substituting the binary versions used for each version string to support
- // the
- // dev/stable MongoDB release cycle.
- //
- // If you add a new version substitution to this list, you should add it to the lists of
- // versions being checked in 'verify_versions_test.js' to verify it is susbstituted correctly.
- MongoRunner.binVersionSubs = [
- new MongoRunner.VersionSub("latest", shellVersion()),
- new MongoRunner.VersionSub(extractMajorVersionFromVersionString(shellVersion()),
- shellVersion()),
- // To-be-updated when we branch for the next release.
- new MongoRunner.VersionSub("last-stable", "4.2")
- ];
-
- MongoRunner.getBinVersionFor = function(version) {
- if (version instanceof MongoRunner.versionIterator.iterator) {
- version = version.current();
- }
-
- if (version == null)
- version = "";
- version = version.trim();
- if (version === "")
- version = "latest";
-
- // See if this version is affected by version substitutions
- for (var i = 0; i < MongoRunner.binVersionSubs.length; i++) {
- var sub = MongoRunner.binVersionSubs[i];
- if (sub.pattern == version) {
- return sub.version;
- }
- }
-
- return version;
- };
-
- /**
- * Returns true if two version strings could represent the same version. This is true
- * if, after passing the versions through getBinVersionFor, the versions have the
- * same value for each version component up through the length of the shorter version.
- *
- * That is, 3.2.4 compares equal to 3.2, but 3.2.4 does not compare equal to 3.2.3.
- */
- MongoRunner.areBinVersionsTheSame = function(versionA, versionB) {
+ return fullArgs;
+};
+
+MongoRunner = function() {};
+
+MongoRunner.dataDir = "/data/db";
+MongoRunner.dataPath = "/data/db/";
+
+MongoRunner.mongodPath = "mongod";
+MongoRunner.mongosPath = "mongos";
+MongoRunner.mongoShellPath = "mongo";
+
+MongoRunner.VersionSub = function(pattern, version) {
+ this.pattern = pattern;
+ this.version = version;
+};
+
+/**
+ * Returns an array of version elements from a version string.
+ *
+ * "3.3.4-fade3783" -> ["3", "3", "4-fade3783" ]
+ * "3.2" -> [ "3", "2" ]
+ * 3 -> exception: versions must have at least two components.
+ */
+var convertVersionStringToArray = function(versionString) {
+ assert("" !== versionString, "Version strings must not be empty");
+ var versionArray = versionString.split('.');
+
+ assert.gt(versionArray.length,
+ 1,
+ "MongoDB versions must have at least two components to compare, but \"" +
+ versionString + "\" has " + versionArray.length);
+ return versionArray;
+};
+
+/**
+ * Returns the major version string from a version string.
+ *
+ * 3.3.4-fade3783 -> 3.3
+ * 3.2 -> 3.2
+ * 3 -> exception: versions must have at least two components.
+ */
+var extractMajorVersionFromVersionString = function(versionString) {
+ return convertVersionStringToArray(versionString).slice(0, 2).join('.');
+};
+
+// These patterns allow substituting the binary versions used for each version string to support
+// the
+// dev/stable MongoDB release cycle.
+//
+// If you add a new version substitution to this list, you should add it to the lists of
+// versions being checked in 'verify_versions_test.js' to verify it is susbstituted correctly.
+MongoRunner.binVersionSubs = [
+ new MongoRunner.VersionSub("latest", shellVersion()),
+ new MongoRunner.VersionSub(extractMajorVersionFromVersionString(shellVersion()),
+ shellVersion()),
+ // To-be-updated when we branch for the next release.
+ new MongoRunner.VersionSub("last-stable", "4.2")
+];
+
+MongoRunner.getBinVersionFor = function(version) {
+ if (version instanceof MongoRunner.versionIterator.iterator) {
+ version = version.current();
+ }
- // Check for invalid version strings first.
- convertVersionStringToArray(MongoRunner.getBinVersionFor(versionA));
- convertVersionStringToArray(MongoRunner.getBinVersionFor(versionB));
+ if (version == null)
+ version = "";
+ version = version.trim();
+ if (version === "")
+ version = "latest";
- try {
- return (0 === MongoRunner.compareBinVersions(versionA, versionB));
- } catch (err) {
- // compareBinVersions() throws an error if two versions differ only by the git hash.
- return false;
+ // See if this version is affected by version substitutions
+ for (var i = 0; i < MongoRunner.binVersionSubs.length; i++) {
+ var sub = MongoRunner.binVersionSubs[i];
+ if (sub.pattern == version) {
+ return sub.version;
}
- };
-
- /**
- * Compares two version strings and returns:
- * 1, if the first is more recent
- * 0, if they are equal
- * -1, if the first is older
- *
- * Note that this function only compares up to the length of the shorter version.
- * Because of this, minor versions will compare equal to the major versions they stem
- * from, but major-major and minor-minor version pairs will undergo strict comparison.
- */
- MongoRunner.compareBinVersions = function(versionA, versionB) {
-
- let stringA = versionA;
- let stringB = versionB;
-
- versionA = convertVersionStringToArray(MongoRunner.getBinVersionFor(versionA));
- versionB = convertVersionStringToArray(MongoRunner.getBinVersionFor(versionB));
-
- // Treat the githash as a separate element, if it's present.
- versionA.push(...versionA.pop().split("-"));
- versionB.push(...versionB.pop().split("-"));
+ }
- var elementsToCompare = Math.min(versionA.length, versionB.length);
+ return version;
+};
+
+/**
+ * Returns true if two version strings could represent the same version. This is true
+ * if, after passing the versions through getBinVersionFor, the versions have the
+ * same value for each version component up through the length of the shorter version.
+ *
+ * That is, 3.2.4 compares equal to 3.2, but 3.2.4 does not compare equal to 3.2.3.
+ */
+MongoRunner.areBinVersionsTheSame = function(versionA, versionB) {
+ // Check for invalid version strings first.
+ convertVersionStringToArray(MongoRunner.getBinVersionFor(versionA));
+ convertVersionStringToArray(MongoRunner.getBinVersionFor(versionB));
+
+ try {
+ return (0 === MongoRunner.compareBinVersions(versionA, versionB));
+ } catch (err) {
+ // compareBinVersions() throws an error if two versions differ only by the git hash.
+ return false;
+ }
+};
- for (var i = 0; i < elementsToCompare; ++i) {
- var elementA = versionA[i];
- var elementB = versionB[i];
+/**
+ * Compares two version strings and returns:
+ * 1, if the first is more recent
+ * 0, if they are equal
+ * -1, if the first is older
+ *
+ * Note that this function only compares up to the length of the shorter version.
+ * Because of this, minor versions will compare equal to the major versions they stem
+ * from, but major-major and minor-minor version pairs will undergo strict comparison.
+ */
+MongoRunner.compareBinVersions = function(versionA, versionB) {
+ let stringA = versionA;
+ let stringB = versionB;
- if (elementA === elementB) {
- continue;
- }
+ versionA = convertVersionStringToArray(MongoRunner.getBinVersionFor(versionA));
+ versionB = convertVersionStringToArray(MongoRunner.getBinVersionFor(versionB));
- var numA = parseInt(elementA);
- var numB = parseInt(elementB);
+ // Treat the githash as a separate element, if it's present.
+ versionA.push(...versionA.pop().split("-"));
+ versionB.push(...versionB.pop().split("-"));
- assert(!isNaN(numA) && !isNaN(numB), "Cannot compare non-equal non-numeric versions.");
+ var elementsToCompare = Math.min(versionA.length, versionB.length);
- if (numA > numB) {
- return 1;
- } else if (numA < numB) {
- return -1;
- }
+ for (var i = 0; i < elementsToCompare; ++i) {
+ var elementA = versionA[i];
+ var elementB = versionB[i];
- assert(false, `Unreachable case. Provided versions: {${stringA}, ${stringB}}`);
+ if (elementA === elementB) {
+ continue;
}
- return 0;
- };
-
- MongoRunner.logicalOptions = {
- runId: true,
- env: true,
- pathOpts: true,
- remember: true,
- noRemember: true,
- appendOptions: true,
- restart: true,
- noCleanData: true,
- cleanData: true,
- startClean: true,
- forceLock: true,
- useLogFiles: true,
- logFile: true,
- useHostName: true,
- useHostname: true,
- noReplSet: true,
- forgetPort: true,
- arbiter: true,
- noJournal: true,
- binVersion: true,
- waitForConnect: true,
- bridgeOptions: true,
- skipValidation: true,
- };
+ var numA = parseInt(elementA);
+ var numB = parseInt(elementB);
- MongoRunner.toRealPath = function(path, pathOpts) {
+ assert(!isNaN(numA) && !isNaN(numB), "Cannot compare non-equal non-numeric versions.");
- // Replace all $pathOptions with actual values
- pathOpts = pathOpts || {};
- path = path.replace(/\$dataPath/g, MongoRunner.dataPath);
- path = path.replace(/\$dataDir/g, MongoRunner.dataDir);
- for (var key in pathOpts) {
- path = path.replace(RegExp("\\$" + RegExp.escape(key), "g"), pathOpts[key]);
+ if (numA > numB) {
+ return 1;
+ } else if (numA < numB) {
+ return -1;
}
- // Relative path
- // Detect Unix and Windows absolute paths
- // as well as Windows drive letters
- // Also captures Windows UNC paths
+ assert(false, `Unreachable case. Provided versions: {${stringA}, ${stringB}}`);
+ }
- if (!path.match(/^(\/|\\|[A-Za-z]:)/)) {
- if (path != "" && !path.endsWith("/"))
- path += "/";
+ return 0;
+};
+
+MongoRunner.logicalOptions = {
+ runId: true,
+ env: true,
+ pathOpts: true,
+ remember: true,
+ noRemember: true,
+ appendOptions: true,
+ restart: true,
+ noCleanData: true,
+ cleanData: true,
+ startClean: true,
+ forceLock: true,
+ useLogFiles: true,
+ logFile: true,
+ useHostName: true,
+ useHostname: true,
+ noReplSet: true,
+ forgetPort: true,
+ arbiter: true,
+ noJournal: true,
+ binVersion: true,
+ waitForConnect: true,
+ bridgeOptions: true,
+ skipValidation: true,
+};
+
+MongoRunner.toRealPath = function(path, pathOpts) {
+ // Replace all $pathOptions with actual values
+ pathOpts = pathOpts || {};
+ path = path.replace(/\$dataPath/g, MongoRunner.dataPath);
+ path = path.replace(/\$dataDir/g, MongoRunner.dataDir);
+ for (var key in pathOpts) {
+ path = path.replace(RegExp("\\$" + RegExp.escape(key), "g"), pathOpts[key]);
+ }
- path = MongoRunner.dataPath + path;
- }
+ // Relative path
+ // Detect Unix and Windows absolute paths
+ // as well as Windows drive letters
+ // Also captures Windows UNC paths
- return path;
+ if (!path.match(/^(\/|\\|[A-Za-z]:)/)) {
+ if (path != "" && !path.endsWith("/"))
+ path += "/";
- };
+ path = MongoRunner.dataPath + path;
+ }
- MongoRunner.toRealDir = function(path, pathOpts) {
+ return path;
+};
- path = MongoRunner.toRealPath(path, pathOpts);
+MongoRunner.toRealDir = function(path, pathOpts) {
+ path = MongoRunner.toRealPath(path, pathOpts);
- if (path.endsWith("/"))
- path = path.substring(0, path.length - 1);
+ if (path.endsWith("/"))
+ path = path.substring(0, path.length - 1);
- return path;
- };
+ return path;
+};
- MongoRunner.toRealFile = MongoRunner.toRealDir;
+MongoRunner.toRealFile = MongoRunner.toRealDir;
- /**
- * Returns an iterator object which yields successive versions on calls to advance(), starting
- * from a random initial position, from an array of versions.
- *
- * If passed a single version string or an already-existing version iterator, just returns the
- * object itself, since it will yield correctly on calls to advance().
- *
- * @param {Array.<String>}|{String}|{versionIterator}
- */
- MongoRunner.versionIterator = function(arr, isRandom) {
+/**
+ * Returns an iterator object which yields successive versions on calls to advance(), starting
+ * from a random initial position, from an array of versions.
+ *
+ * If passed a single version string or an already-existing version iterator, just returns the
+ * object itself, since it will yield correctly on calls to advance().
+ *
+ * @param {Array.<String>}|{String}|{versionIterator}
+ */
+MongoRunner.versionIterator = function(arr, isRandom) {
+ // If this isn't an array of versions, or is already an iterator, just use it
+ if (typeof arr == "string")
+ return arr;
+ if (arr.isVersionIterator)
+ return arr;
- // If this isn't an array of versions, or is already an iterator, just use it
- if (typeof arr == "string")
- return arr;
- if (arr.isVersionIterator)
- return arr;
+ if (isRandom == undefined)
+ isRandom = false;
- if (isRandom == undefined)
- isRandom = false;
+ // Starting pos
+ var i = isRandom ? parseInt(Random.rand() * arr.length) : 0;
- // Starting pos
- var i = isRandom ? parseInt(Random.rand() * arr.length) : 0;
+ return new MongoRunner.versionIterator.iterator(i, arr);
+};
- return new MongoRunner.versionIterator.iterator(i, arr);
+MongoRunner.versionIterator.iterator = function(i, arr) {
+ if (!Array.isArray(arr)) {
+ throw new Error("Expected an array for the second argument, but got: " + tojson(arr));
+ }
+
+ this.current = function current() {
+ return arr[i];
};
- MongoRunner.versionIterator.iterator = function(i, arr) {
- if (!Array.isArray(arr)) {
- throw new Error("Expected an array for the second argument, but got: " + tojson(arr));
- }
+ // We define the toString() method as an alias for current() so that concatenating a version
+ // iterator with a string returns the next version in the list without introducing any
+ // side-effects.
+ this.toString = this.current;
- this.current = function current() {
- return arr[i];
- };
+ this.advance = function advance() {
+ i = (i + 1) % arr.length;
+ };
- // We define the toString() method as an alias for current() so that concatenating a version
- // iterator with a string returns the next version in the list without introducing any
- // side-effects.
- this.toString = this.current;
+ this.isVersionIterator = true;
+};
+
+/**
+ * Converts the args object by pairing all keys with their value and appending
+ * dash-dash (--) to the keys. The only exception to this rule are keys that
+ * are defined in MongoRunner.logicalOptions, of which they will be ignored.
+ *
+ * @param {string} binaryName
+ * @param {Object} args
+ *
+ * @return {Array.<String>} an array of parameter strings that can be passed
+ * to the binary.
+ */
+MongoRunner.arrOptions = function(binaryName, args) {
+ var fullArgs = [""];
+
+ // isObject returns true even if "args" is an array, so the else branch of this statement is
+ // dead code. See SERVER-14220.
+ if (isObject(args) || (args.length == 1 && isObject(args[0]))) {
+ var o = isObject(args) ? args : args[0];
+
+ // If we've specified a particular binary version, use that
+ if (o.binVersion && o.binVersion != "" && o.binVersion != shellVersion()) {
+ binaryName += "-" + o.binVersion;
+ }
+
+ // Manage legacy options
+ var isValidOptionForBinary = function(option, value) {
+ if (!o.binVersion)
+ return true;
- this.advance = function advance() {
- i = (i + 1) % arr.length;
+ return true;
};
- this.isVersionIterator = true;
+ var addOptionsToFullArgs = function(k, v) {
+ if (v === undefined || v === null)
+ return;
- };
+ fullArgs.push("--" + k);
- /**
- * Converts the args object by pairing all keys with their value and appending
- * dash-dash (--) to the keys. The only exception to this rule are keys that
- * are defined in MongoRunner.logicalOptions, of which they will be ignored.
- *
- * @param {string} binaryName
- * @param {Object} args
- *
- * @return {Array.<String>} an array of parameter strings that can be passed
- * to the binary.
- */
- MongoRunner.arrOptions = function(binaryName, args) {
-
- var fullArgs = [""];
-
- // isObject returns true even if "args" is an array, so the else branch of this statement is
- // dead code. See SERVER-14220.
- if (isObject(args) || (args.length == 1 && isObject(args[0]))) {
- var o = isObject(args) ? args : args[0];
-
- // If we've specified a particular binary version, use that
- if (o.binVersion && o.binVersion != "" && o.binVersion != shellVersion()) {
- binaryName += "-" + o.binVersion;
+ if (v != "") {
+ fullArgs.push("" + v);
}
+ };
- // Manage legacy options
- var isValidOptionForBinary = function(option, value) {
-
- if (!o.binVersion)
- return true;
-
- return true;
- };
-
- var addOptionsToFullArgs = function(k, v) {
- if (v === undefined || v === null)
- return;
-
- fullArgs.push("--" + k);
-
- if (v != "") {
- fullArgs.push("" + v);
- }
- };
-
- for (var k in o) {
- // Make sure our logical option should be added to the array of options
- if (!o.hasOwnProperty(k) || k in MongoRunner.logicalOptions ||
- !isValidOptionForBinary(k, o[k]))
- continue;
+ for (var k in o) {
+ // Make sure our logical option should be added to the array of options
+ if (!o.hasOwnProperty(k) || k in MongoRunner.logicalOptions ||
+ !isValidOptionForBinary(k, o[k]))
+ continue;
- if ((k == "v" || k == "verbose") && isNumber(o[k])) {
- var n = o[k];
- if (n > 0) {
- if (n > 10)
- n = 10;
- var temp = "-";
- while (n-- > 0)
- temp += "v";
- fullArgs.push(temp);
- }
- } else if (k === "setParameter" && isObject(o[k])) {
- // If the value associated with the setParameter option is an object, we want
- // to add all key-value pairs in that object as separate --setParameters.
- Object.keys(o[k]).forEach(function(paramKey) {
- addOptionsToFullArgs(k, "" + paramKey + "=" + o[k][paramKey]);
- });
- } else {
- addOptionsToFullArgs(k, o[k]);
+ if ((k == "v" || k == "verbose") && isNumber(o[k])) {
+ var n = o[k];
+ if (n > 0) {
+ if (n > 10)
+ n = 10;
+ var temp = "-";
+ while (n-- > 0)
+ temp += "v";
+ fullArgs.push(temp);
}
+ } else if (k === "setParameter" && isObject(o[k])) {
+ // If the value associated with the setParameter option is an object, we want
+ // to add all key-value pairs in that object as separate --setParameters.
+ Object.keys(o[k]).forEach(function(paramKey) {
+ addOptionsToFullArgs(k, "" + paramKey + "=" + o[k][paramKey]);
+ });
+ } else {
+ addOptionsToFullArgs(k, o[k]);
}
- } else {
- for (var i = 0; i < args.length; i++)
- fullArgs.push(args[i]);
}
+ } else {
+ for (var i = 0; i < args.length; i++)
+ fullArgs.push(args[i]);
+ }
- fullArgs[0] = binaryName;
- return fullArgs;
- };
-
- MongoRunner.arrToOpts = function(arr) {
+ fullArgs[0] = binaryName;
+ return fullArgs;
+};
- var opts = {};
- for (var i = 1; i < arr.length; i++) {
- if (arr[i].startsWith("-")) {
- var opt = arr[i].replace(/^-/, "").replace(/^-/, "");
+MongoRunner.arrToOpts = function(arr) {
+ var opts = {};
+ for (var i = 1; i < arr.length; i++) {
+ if (arr[i].startsWith("-")) {
+ var opt = arr[i].replace(/^-/, "").replace(/^-/, "");
- if (arr.length > i + 1 && !arr[i + 1].startsWith("-")) {
- opts[opt] = arr[i + 1];
- i++;
- } else {
- opts[opt] = "";
- }
+ if (arr.length > i + 1 && !arr[i + 1].startsWith("-")) {
+ opts[opt] = arr[i + 1];
+ i++;
+ } else {
+ opts[opt] = "";
+ }
- if (opt.replace(/v/g, "") == "") {
- opts["verbose"] = opt.length;
- }
+ if (opt.replace(/v/g, "") == "") {
+ opts["verbose"] = opt.length;
}
}
+ }
- return opts;
- };
+ return opts;
+};
- MongoRunner.savedOptions = {};
+MongoRunner.savedOptions = {};
- MongoRunner.mongoOptions = function(opts) {
- // Don't remember waitForConnect
- var waitForConnect = opts.waitForConnect;
- delete opts.waitForConnect;
+MongoRunner.mongoOptions = function(opts) {
+ // Don't remember waitForConnect
+ var waitForConnect = opts.waitForConnect;
+ delete opts.waitForConnect;
- // If we're a mongo object
- if (opts.getDB) {
- opts = {restart: opts.runId};
- }
+ // If we're a mongo object
+ if (opts.getDB) {
+ opts = {restart: opts.runId};
+ }
- // Initialize and create a copy of the opts
- opts = Object.merge(opts || {}, {});
+ // Initialize and create a copy of the opts
+ opts = Object.merge(opts || {}, {});
- if (!opts.restart)
- opts.restart = false;
+ if (!opts.restart)
+ opts.restart = false;
- // RunId can come from a number of places
- // If restart is passed as an old connection
- if (opts.restart && opts.restart.getDB) {
- opts.runId = opts.restart.runId;
- opts.restart = true;
- }
- // If it's the runId itself
- else if (isObject(opts.restart)) {
- opts.runId = opts.restart;
- opts.restart = true;
- }
+ // RunId can come from a number of places
+ // If restart is passed as an old connection
+ if (opts.restart && opts.restart.getDB) {
+ opts.runId = opts.restart.runId;
+ opts.restart = true;
+ }
+ // If it's the runId itself
+ else if (isObject(opts.restart)) {
+ opts.runId = opts.restart;
+ opts.restart = true;
+ }
- if (isObject(opts.remember)) {
- opts.runId = opts.remember;
- opts.remember = true;
- } else if (opts.remember == undefined) {
- // Remember by default if we're restarting
- opts.remember = opts.restart;
- }
+ if (isObject(opts.remember)) {
+ opts.runId = opts.remember;
+ opts.remember = true;
+ } else if (opts.remember == undefined) {
+ // Remember by default if we're restarting
+ opts.remember = opts.restart;
+ }
- // If we passed in restart : <conn> or runId : <conn>
- if (isObject(opts.runId) && opts.runId.runId)
- opts.runId = opts.runId.runId;
+ // If we passed in restart : <conn> or runId : <conn>
+ if (isObject(opts.runId) && opts.runId.runId)
+ opts.runId = opts.runId.runId;
- if (opts.restart && opts.remember) {
- opts = Object.merge(MongoRunner.savedOptions[opts.runId], opts);
- }
+ if (opts.restart && opts.remember) {
+ opts = Object.merge(MongoRunner.savedOptions[opts.runId], opts);
+ }
- // Create a new runId
- opts.runId = opts.runId || ObjectId();
+ // Create a new runId
+ opts.runId = opts.runId || ObjectId();
- if (opts.forgetPort) {
- delete opts.port;
- }
+ if (opts.forgetPort) {
+ delete opts.port;
+ }
- // Normalize and get the binary version to use
- if (opts.hasOwnProperty('binVersion')) {
- if (opts.binVersion instanceof MongoRunner.versionIterator.iterator) {
- // Advance the version iterator so that subsequent calls to
- // MongoRunner.mongoOptions() use the next version in the list.
- const iterator = opts.binVersion;
- opts.binVersion = iterator.current();
- iterator.advance();
- }
- opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+ // Normalize and get the binary version to use
+ if (opts.hasOwnProperty('binVersion')) {
+ if (opts.binVersion instanceof MongoRunner.versionIterator.iterator) {
+ // Advance the version iterator so that subsequent calls to
+ // MongoRunner.mongoOptions() use the next version in the list.
+ const iterator = opts.binVersion;
+ opts.binVersion = iterator.current();
+ iterator.advance();
}
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+ }
- // Default for waitForConnect is true
- opts.waitForConnect =
- (waitForConnect == undefined || waitForConnect == null) ? true : waitForConnect;
+ // Default for waitForConnect is true
+ opts.waitForConnect =
+ (waitForConnect == undefined || waitForConnect == null) ? true : waitForConnect;
- opts.port = opts.port || allocatePort();
+ opts.port = opts.port || allocatePort();
- opts.pathOpts =
- Object.merge(opts.pathOpts || {}, {port: "" + opts.port, runId: "" + opts.runId});
+ opts.pathOpts =
+ Object.merge(opts.pathOpts || {}, {port: "" + opts.port, runId: "" + opts.runId});
- var shouldRemember =
- (!opts.restart && !opts.noRemember) || (opts.restart && opts.appendOptions);
- if (shouldRemember) {
- MongoRunner.savedOptions[opts.runId] = Object.merge(opts, {});
- }
+ var shouldRemember =
+ (!opts.restart && !opts.noRemember) || (opts.restart && opts.appendOptions);
+ if (shouldRemember) {
+ MongoRunner.savedOptions[opts.runId] = Object.merge(opts, {});
+ }
- if (jsTestOptions().networkMessageCompressors) {
- opts.networkMessageCompressors = jsTestOptions().networkMessageCompressors;
- }
+ if (jsTestOptions().networkMessageCompressors) {
+ opts.networkMessageCompressors = jsTestOptions().networkMessageCompressors;
+ }
- if (!opts.hasOwnProperty('bind_ip')) {
- opts.bind_ip = "0.0.0.0";
- }
+ if (!opts.hasOwnProperty('bind_ip')) {
+ opts.bind_ip = "0.0.0.0";
+ }
- return opts;
- };
+ return opts;
+};
- // Returns an array of integers representing the version provided.
- // Ex: "3.3.12" => [3, 3, 12]
- var _convertVersionToIntegerArray = function(version) {
- var versionParts =
- convertVersionStringToArray(version).slice(0, 3).map(part => parseInt(part, 10));
- if (versionParts.length === 2) {
- versionParts.push(Infinity);
- }
- return versionParts;
- };
+// Returns an array of integers representing the version provided.
+// Ex: "3.3.12" => [3, 3, 12]
+var _convertVersionToIntegerArray = function(version) {
+ var versionParts =
+ convertVersionStringToArray(version).slice(0, 3).map(part => parseInt(part, 10));
+ if (versionParts.length === 2) {
+ versionParts.push(Infinity);
+ }
+ return versionParts;
+};
- // Returns if version2 is equal to, or came after, version 1.
- var _isMongodVersionEqualOrAfter = function(version1, version2) {
- if (version2 === "latest") {
- return true;
- }
+// Returns if version2 is equal to, or came after, version 1.
+var _isMongodVersionEqualOrAfter = function(version1, version2) {
+ if (version2 === "latest") {
+ return true;
+ }
- var versionParts1 = _convertVersionToIntegerArray(version1);
- var versionParts2 = _convertVersionToIntegerArray(version2);
- if (versionParts2[0] > versionParts1[0] ||
- (versionParts2[0] === versionParts1[0] && versionParts2[1] > versionParts1[1]) ||
- (versionParts2[0] === versionParts1[0] && versionParts2[1] === versionParts1[1] &&
- versionParts2[2] >= versionParts1[2])) {
- return true;
- }
+ var versionParts1 = _convertVersionToIntegerArray(version1);
+ var versionParts2 = _convertVersionToIntegerArray(version2);
+ if (versionParts2[0] > versionParts1[0] ||
+ (versionParts2[0] === versionParts1[0] && versionParts2[1] > versionParts1[1]) ||
+ (versionParts2[0] === versionParts1[0] && versionParts2[1] === versionParts1[1] &&
+ versionParts2[2] >= versionParts1[2])) {
+ return true;
+ }
- return false;
- };
+ return false;
+};
+
+// Removes a setParameter parameter from mongods running a version that won't recognize them.
+var _removeSetParameterIfBeforeVersion = function(opts, parameterName, requiredVersion) {
+ var versionCompatible = (opts.binVersion === "" || opts.binVersion === undefined ||
+ _isMongodVersionEqualOrAfter(requiredVersion, opts.binVersion));
+ if (!versionCompatible && opts.setParameter && opts.setParameter[parameterName] != undefined) {
+ print("Removing '" + parameterName + "' setParameter with value " +
+ opts.setParameter[parameterName] +
+ " because it isn't compatibile with mongod running version " + opts.binVersion);
+ delete opts.setParameter[parameterName];
+ }
+};
+
+/**
+ * @option {object} opts
+ *
+ * {
+ * dbpath {string}
+ * useLogFiles {boolean}: use with logFile option.
+ * logFile {string}: path to the log file. If not specified and useLogFiles
+ * is true, automatically creates a log file inside dbpath.
+ * noJournal {boolean}
+ * keyFile
+ * replSet
+ * oplogSize
+ * }
+ */
+MongoRunner.mongodOptions = function(opts) {
+ opts = MongoRunner.mongoOptions(opts);
+
+ opts.dbpath = MongoRunner.toRealDir(opts.dbpath || "$dataDir/mongod-$port", opts.pathOpts);
+
+ opts.pathOpts = Object.merge(opts.pathOpts, {dbpath: opts.dbpath});
+
+ _removeSetParameterIfBeforeVersion(opts, "writePeriodicNoops", "3.3.12");
+ _removeSetParameterIfBeforeVersion(opts, "numInitialSyncAttempts", "3.3.12");
+ _removeSetParameterIfBeforeVersion(opts, "numInitialSyncConnectAttempts", "3.3.12");
+ _removeSetParameterIfBeforeVersion(opts, "migrationLockAcquisitionMaxWaitMS", "4.1.7");
+
+ if (!opts.logFile && opts.useLogFiles) {
+ opts.logFile = opts.dbpath + "/mongod.log";
+ } else if (opts.logFile) {
+ opts.logFile = MongoRunner.toRealFile(opts.logFile, opts.pathOpts);
+ }
- // Removes a setParameter parameter from mongods running a version that won't recognize them.
- var _removeSetParameterIfBeforeVersion = function(opts, parameterName, requiredVersion) {
- var versionCompatible = (opts.binVersion === "" || opts.binVersion === undefined ||
- _isMongodVersionEqualOrAfter(requiredVersion, opts.binVersion));
- if (!versionCompatible && opts.setParameter &&
- opts.setParameter[parameterName] != undefined) {
- print("Removing '" + parameterName + "' setParameter with value " +
- opts.setParameter[parameterName] +
- " because it isn't compatibile with mongod running version " + opts.binVersion);
- delete opts.setParameter[parameterName];
- }
- };
+ if (opts.logFile !== undefined) {
+ opts.logpath = opts.logFile;
+ }
- /**
- * @option {object} opts
- *
- * {
- * dbpath {string}
- * useLogFiles {boolean}: use with logFile option.
- * logFile {string}: path to the log file. If not specified and useLogFiles
- * is true, automatically creates a log file inside dbpath.
- * noJournal {boolean}
- * keyFile
- * replSet
- * oplogSize
- * }
- */
- MongoRunner.mongodOptions = function(opts) {
-
- opts = MongoRunner.mongoOptions(opts);
-
- opts.dbpath = MongoRunner.toRealDir(opts.dbpath || "$dataDir/mongod-$port", opts.pathOpts);
-
- opts.pathOpts = Object.merge(opts.pathOpts, {dbpath: opts.dbpath});
-
- _removeSetParameterIfBeforeVersion(opts, "writePeriodicNoops", "3.3.12");
- _removeSetParameterIfBeforeVersion(opts, "numInitialSyncAttempts", "3.3.12");
- _removeSetParameterIfBeforeVersion(opts, "numInitialSyncConnectAttempts", "3.3.12");
- _removeSetParameterIfBeforeVersion(opts, "migrationLockAcquisitionMaxWaitMS", "4.1.7");
-
- if (!opts.logFile && opts.useLogFiles) {
- opts.logFile = opts.dbpath + "/mongod.log";
- } else if (opts.logFile) {
- opts.logFile = MongoRunner.toRealFile(opts.logFile, opts.pathOpts);
- }
+ if ((jsTestOptions().noJournal || opts.noJournal) && !('journal' in opts) &&
+ !('configsvr' in opts)) {
+ opts.nojournal = "";
+ }
- if (opts.logFile !== undefined) {
- opts.logpath = opts.logFile;
- }
+ if (jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile;
+ }
- if ((jsTestOptions().noJournal || opts.noJournal) && !('journal' in opts) &&
- !('configsvr' in opts)) {
- opts.nojournal = "";
+ if (opts.hasOwnProperty("enableEncryption")) {
+ // opts.enableEncryption, if set, must be an empty string
+ if (opts.enableEncryption !== "") {
+ throw new Error("The enableEncryption option must be an empty string if it is " +
+ "specified");
}
-
- if (jsTestOptions().keyFile && !opts.keyFile) {
- opts.keyFile = jsTestOptions().keyFile;
+ } else if (jsTestOptions().enableEncryption !== undefined) {
+ if (jsTestOptions().enableEncryption !== "") {
+ throw new Error("The enableEncryption option must be an empty string if it is " +
+ "specified");
}
+ opts.enableEncryption = "";
+ }
- if (opts.hasOwnProperty("enableEncryption")) {
- // opts.enableEncryption, if set, must be an empty string
- if (opts.enableEncryption !== "") {
- throw new Error("The enableEncryption option must be an empty string if it is " +
- "specified");
- }
- } else if (jsTestOptions().enableEncryption !== undefined) {
- if (jsTestOptions().enableEncryption !== "") {
- throw new Error("The enableEncryption option must be an empty string if it is " +
- "specified");
- }
- opts.enableEncryption = "";
+ if (opts.hasOwnProperty("encryptionKeyFile")) {
+ // opts.encryptionKeyFile, if set, must be a string
+ if (typeof opts.encryptionKeyFile !== "string") {
+ throw new Error("The encryptionKeyFile option must be a string if it is specified");
}
-
- if (opts.hasOwnProperty("encryptionKeyFile")) {
- // opts.encryptionKeyFile, if set, must be a string
- if (typeof opts.encryptionKeyFile !== "string") {
- throw new Error("The encryptionKeyFile option must be a string if it is specified");
- }
- } else if (jsTestOptions().encryptionKeyFile !== undefined) {
- if (typeof(jsTestOptions().encryptionKeyFile) !== "string") {
- throw new Error("The encryptionKeyFile option must be a string if it is specified");
- }
- opts.encryptionKeyFile = jsTestOptions().encryptionKeyFile;
+ } else if (jsTestOptions().encryptionKeyFile !== undefined) {
+ if (typeof (jsTestOptions().encryptionKeyFile) !== "string") {
+ throw new Error("The encryptionKeyFile option must be a string if it is specified");
}
+ opts.encryptionKeyFile = jsTestOptions().encryptionKeyFile;
+ }
- if (opts.hasOwnProperty("auditDestination")) {
- // opts.auditDestination, if set, must be a string
- if (typeof opts.auditDestination !== "string") {
- throw new Error("The auditDestination option must be a string if it is specified");
- }
- } else if (jsTestOptions().auditDestination !== undefined) {
- if (typeof(jsTestOptions().auditDestination) !== "string") {
- throw new Error("The auditDestination option must be a string if it is specified");
- }
- opts.auditDestination = jsTestOptions().auditDestination;
+ if (opts.hasOwnProperty("auditDestination")) {
+ // opts.auditDestination, if set, must be a string
+ if (typeof opts.auditDestination !== "string") {
+ throw new Error("The auditDestination option must be a string if it is specified");
}
-
- if (opts.noReplSet)
- opts.replSet = null;
- if (opts.arbiter)
- opts.oplogSize = 1;
-
- return opts;
- };
-
- MongoRunner.mongosOptions = function(opts) {
- opts = MongoRunner.mongoOptions(opts);
-
- // Normalize configdb option to be host string if currently a host
- if (opts.configdb && opts.configdb.getDB) {
- opts.configdb = opts.configdb.host;
+ } else if (jsTestOptions().auditDestination !== undefined) {
+ if (typeof (jsTestOptions().auditDestination) !== "string") {
+ throw new Error("The auditDestination option must be a string if it is specified");
}
+ opts.auditDestination = jsTestOptions().auditDestination;
+ }
- opts.pathOpts =
- Object.merge(opts.pathOpts, {configdb: opts.configdb.replace(/:|\/|,/g, "-")});
+ if (opts.noReplSet)
+ opts.replSet = null;
+ if (opts.arbiter)
+ opts.oplogSize = 1;
- if (!opts.logFile && opts.useLogFiles) {
- opts.logFile =
- MongoRunner.toRealFile("$dataDir/mongos-$configdb-$port.log", opts.pathOpts);
- } else if (opts.logFile) {
- opts.logFile = MongoRunner.toRealFile(opts.logFile, opts.pathOpts);
- }
+ return opts;
+};
- if (opts.logFile !== undefined) {
- opts.logpath = opts.logFile;
- }
+MongoRunner.mongosOptions = function(opts) {
+ opts = MongoRunner.mongoOptions(opts);
- var testOptions = jsTestOptions();
- if (testOptions.keyFile && !opts.keyFile) {
- opts.keyFile = testOptions.keyFile;
- }
+ // Normalize configdb option to be host string if currently a host
+ if (opts.configdb && opts.configdb.getDB) {
+ opts.configdb = opts.configdb.host;
+ }
- if (opts.hasOwnProperty("auditDestination")) {
- // opts.auditDestination, if set, must be a string
- if (typeof opts.auditDestination !== "string") {
- throw new Error("The auditDestination option must be a string if it is specified");
- }
- } else if (testOptions.auditDestination !== undefined) {
- if (typeof(testOptions.auditDestination) !== "string") {
- throw new Error("The auditDestination option must be a string if it is specified");
- }
- opts.auditDestination = testOptions.auditDestination;
- }
+ opts.pathOpts = Object.merge(opts.pathOpts, {configdb: opts.configdb.replace(/:|\/|,/g, "-")});
- if (!opts.hasOwnProperty('binVersion') && testOptions.mongosBinVersion) {
- opts.binVersion = MongoRunner.getBinVersionFor(testOptions.mongosBinVersion);
- }
-
- // If the mongos is being restarted with a newer version, make sure we remove any options
- // that no longer exist in the newer version.
- if (opts.restart && MongoRunner.areBinVersionsTheSame('latest', opts.binVersion)) {
- delete opts.noAutoSplit;
- }
+ if (!opts.logFile && opts.useLogFiles) {
+ opts.logFile = MongoRunner.toRealFile("$dataDir/mongos-$configdb-$port.log", opts.pathOpts);
+ } else if (opts.logFile) {
+ opts.logFile = MongoRunner.toRealFile(opts.logFile, opts.pathOpts);
+ }
- return opts;
- };
+ if (opts.logFile !== undefined) {
+ opts.logpath = opts.logFile;
+ }
- /**
- * Starts a mongod instance.
- *
- * @param {Object} opts
- *
- * {
- * useHostName {boolean}: Uses hostname of machine if true.
- * forceLock {boolean}: Deletes the lock file if set to true.
- * dbpath {string}: location of db files.
- * cleanData {boolean}: Removes all files in dbpath if true.
- * startClean {boolean}: same as cleanData.
- * noCleanData {boolean}: Do not clean files (cleanData takes priority).
- * binVersion {string}: version for binary (also see MongoRunner.binVersionSubs).
- *
- * @see MongoRunner.mongodOptions for other options
- * }
- *
- * @return {Mongo} connection object to the started mongod instance.
- *
- * @see MongoRunner.arrOptions
- */
- MongoRunner.runMongod = function(opts) {
-
- opts = opts || {};
- var env = undefined;
- var useHostName = true;
- var runId = null;
- var waitForConnect = true;
- var fullOptions = opts;
-
- if (isObject(opts)) {
- opts = MongoRunner.mongodOptions(opts);
- fullOptions = opts;
-
- if (opts.useHostName != undefined) {
- useHostName = opts.useHostName;
- } else if (opts.useHostname != undefined) {
- useHostName = opts.useHostname;
- } else {
- useHostName = true; // Default to true
- }
- env = opts.env;
- runId = opts.runId;
- waitForConnect = opts.waitForConnect;
-
- if (opts.forceLock)
- removeFile(opts.dbpath + "/mongod.lock");
- if ((opts.cleanData || opts.startClean) || (!opts.restart && !opts.noCleanData)) {
- print("Resetting db path '" + opts.dbpath + "'");
- resetDbpath(opts.dbpath);
- }
+ var testOptions = jsTestOptions();
+ if (testOptions.keyFile && !opts.keyFile) {
+ opts.keyFile = testOptions.keyFile;
+ }
- var mongodProgram = MongoRunner.mongodPath;
- opts = MongoRunner.arrOptions(mongodProgram, opts);
+ if (opts.hasOwnProperty("auditDestination")) {
+ // opts.auditDestination, if set, must be a string
+ if (typeof opts.auditDestination !== "string") {
+ throw new Error("The auditDestination option must be a string if it is specified");
}
-
- var mongod = MongoRunner._startWithArgs(opts, env, waitForConnect);
- if (!mongod) {
- return null;
+ } else if (testOptions.auditDestination !== undefined) {
+ if (typeof (testOptions.auditDestination) !== "string") {
+ throw new Error("The auditDestination option must be a string if it is specified");
}
+ opts.auditDestination = testOptions.auditDestination;
+ }
- mongod.commandLine = MongoRunner.arrToOpts(opts);
- mongod.name = (useHostName ? getHostName() : "localhost") + ":" + mongod.commandLine.port;
- mongod.host = mongod.name;
- mongod.port = parseInt(mongod.commandLine.port);
- mongod.runId = runId || ObjectId();
- mongod.dbpath = fullOptions.dbpath;
- mongod.savedOptions = MongoRunner.savedOptions[mongod.runId];
- mongod.fullOptions = fullOptions;
+ if (!opts.hasOwnProperty('binVersion') && testOptions.mongosBinVersion) {
+ opts.binVersion = MongoRunner.getBinVersionFor(testOptions.mongosBinVersion);
+ }
- return mongod;
- };
+ // If the mongos is being restarted with a newer version, make sure we remove any options
+ // that no longer exist in the newer version.
+ if (opts.restart && MongoRunner.areBinVersionsTheSame('latest', opts.binVersion)) {
+ delete opts.noAutoSplit;
+ }
- MongoRunner.runMongos = function(opts) {
- opts = opts || {};
-
- var env = undefined;
- var useHostName = false;
- var runId = null;
- var waitForConnect = true;
- var fullOptions = opts;
-
- if (isObject(opts)) {
- opts = MongoRunner.mongosOptions(opts);
- fullOptions = opts;
-
- useHostName = opts.useHostName || opts.useHostname;
- runId = opts.runId;
- waitForConnect = opts.waitForConnect;
- env = opts.env;
- var mongosProgram = MongoRunner.mongosPath;
- opts = MongoRunner.arrOptions(mongosProgram, opts);
+ return opts;
+};
+
+/**
+ * Starts a mongod instance.
+ *
+ * @param {Object} opts
+ *
+ * {
+ * useHostName {boolean}: Uses hostname of machine if true.
+ * forceLock {boolean}: Deletes the lock file if set to true.
+ * dbpath {string}: location of db files.
+ * cleanData {boolean}: Removes all files in dbpath if true.
+ * startClean {boolean}: same as cleanData.
+ * noCleanData {boolean}: Do not clean files (cleanData takes priority).
+ * binVersion {string}: version for binary (also see MongoRunner.binVersionSubs).
+ *
+ * @see MongoRunner.mongodOptions for other options
+ * }
+ *
+ * @return {Mongo} connection object to the started mongod instance.
+ *
+ * @see MongoRunner.arrOptions
+ */
+MongoRunner.runMongod = function(opts) {
+ opts = opts || {};
+ var env = undefined;
+ var useHostName = true;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if (isObject(opts)) {
+ opts = MongoRunner.mongodOptions(opts);
+ fullOptions = opts;
+
+ if (opts.useHostName != undefined) {
+ useHostName = opts.useHostName;
+ } else if (opts.useHostname != undefined) {
+ useHostName = opts.useHostname;
+ } else {
+ useHostName = true; // Default to true
}
+ env = opts.env;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
- var mongos = MongoRunner._startWithArgs(opts, env, waitForConnect);
- if (!mongos) {
- return null;
+ if (opts.forceLock)
+ removeFile(opts.dbpath + "/mongod.lock");
+ if ((opts.cleanData || opts.startClean) || (!opts.restart && !opts.noCleanData)) {
+ print("Resetting db path '" + opts.dbpath + "'");
+ resetDbpath(opts.dbpath);
}
- mongos.commandLine = MongoRunner.arrToOpts(opts);
- mongos.name = (useHostName ? getHostName() : "localhost") + ":" + mongos.commandLine.port;
- mongos.host = mongos.name;
- mongos.port = parseInt(mongos.commandLine.port);
- mongos.runId = runId || ObjectId();
- mongos.savedOptions = MongoRunner.savedOptions[mongos.runId];
- mongos.fullOptions = fullOptions;
-
- return mongos;
- };
+ var mongodProgram = MongoRunner.mongodPath;
+ opts = MongoRunner.arrOptions(mongodProgram, opts);
+ }
- MongoRunner.StopError = function(returnCode) {
- this.name = "StopError";
- this.returnCode = returnCode;
- this.message = "MongoDB process stopped with exit code: " + this.returnCode;
- this.stack = this.toString() + "\n" + (new Error()).stack;
- };
+ var mongod = MongoRunner._startWithArgs(opts, env, waitForConnect);
+ if (!mongod) {
+ return null;
+ }
- MongoRunner.StopError.prototype = Object.create(Error.prototype);
- MongoRunner.StopError.prototype.constructor = MongoRunner.StopError;
-
- // Constants for exit codes of MongoDB processes
- MongoRunner.EXIT_ABORT = -6;
- MongoRunner.EXIT_CLEAN = 0;
- MongoRunner.EXIT_BADOPTIONS = 2;
- MongoRunner.EXIT_REPLICATION_ERROR = 3;
- MongoRunner.EXIT_NEED_UPGRADE = 4;
- MongoRunner.EXIT_SHARDING_ERROR = 5;
- // SIGKILL is translated to TerminateProcess() on Windows, which causes the program to
- // terminate with exit code 1.
- MongoRunner.EXIT_SIGKILL = _isWindows() ? 1 : -9;
- MongoRunner.EXIT_KILL = 12;
- MongoRunner.EXIT_ABRUPT = 14;
- MongoRunner.EXIT_NTSERVICE_ERROR = 20;
- MongoRunner.EXIT_JAVA = 21;
- MongoRunner.EXIT_OOM_MALLOC = 42;
- MongoRunner.EXIT_OOM_REALLOC = 43;
- MongoRunner.EXIT_FS = 45;
- MongoRunner.EXIT_CLOCK_SKEW = 47; // OpTime clock skew; deprecated
- MongoRunner.EXIT_NET_ERROR = 48;
- MongoRunner.EXIT_WINDOWS_SERVICE_STOP = 49;
- MongoRunner.EXIT_POSSIBLE_CORRUPTION = 60;
- MongoRunner.EXIT_NEED_DOWNGRADE = 62;
- MongoRunner.EXIT_UNCAUGHT = 100; // top level exception that wasn't caught
- MongoRunner.EXIT_TEST = 101;
-
- MongoRunner.validateCollectionsCallback = function(port) {};
-
- /**
- * Kills a mongod process.
- *
- * @param {Mongo} conn the connection object to the process to kill
- * @param {number} signal The signal number to use for killing
- * @param {Object} opts Additional options. Format:
- * {
- * auth: {
- * user {string}: admin user name
- * pwd {string}: admin password
- * },
- * skipValidation: <bool>,
- * allowedExitCode: <int>
- * }
- *
- * Note: The auth option is required in a authenticated mongod running in Windows since
- * it uses the shutdown command, which requires admin credentials.
- */
- MongoRunner.stopMongod = function(conn, signal, opts) {
- if (!conn.pid) {
- throw new Error("first arg must have a `pid` property; " +
- "it is usually the object returned from MongoRunner.runMongod/s");
- }
+ mongod.commandLine = MongoRunner.arrToOpts(opts);
+ mongod.name = (useHostName ? getHostName() : "localhost") + ":" + mongod.commandLine.port;
+ mongod.host = mongod.name;
+ mongod.port = parseInt(mongod.commandLine.port);
+ mongod.runId = runId || ObjectId();
+ mongod.dbpath = fullOptions.dbpath;
+ mongod.savedOptions = MongoRunner.savedOptions[mongod.runId];
+ mongod.fullOptions = fullOptions;
+
+ return mongod;
+};
+
+MongoRunner.runMongos = function(opts) {
+ opts = opts || {};
+
+ var env = undefined;
+ var useHostName = false;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if (isObject(opts)) {
+ opts = MongoRunner.mongosOptions(opts);
+ fullOptions = opts;
+
+ useHostName = opts.useHostName || opts.useHostname;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
+ env = opts.env;
+ var mongosProgram = MongoRunner.mongosPath;
+ opts = MongoRunner.arrOptions(mongosProgram, opts);
+ }
- if (!conn.port) {
- throw new Error("first arg must have a `port` property; " +
- "it is usually the object returned from MongoRunner.runMongod/s");
- }
+ var mongos = MongoRunner._startWithArgs(opts, env, waitForConnect);
+ if (!mongos) {
+ return null;
+ }
- signal = parseInt(signal) || 15;
- opts = opts || {};
+ mongos.commandLine = MongoRunner.arrToOpts(opts);
+ mongos.name = (useHostName ? getHostName() : "localhost") + ":" + mongos.commandLine.port;
+ mongos.host = mongos.name;
+ mongos.port = parseInt(mongos.commandLine.port);
+ mongos.runId = runId || ObjectId();
+ mongos.savedOptions = MongoRunner.savedOptions[mongos.runId];
+ mongos.fullOptions = fullOptions;
+
+ return mongos;
+};
+
+MongoRunner.StopError = function(returnCode) {
+ this.name = "StopError";
+ this.returnCode = returnCode;
+ this.message = "MongoDB process stopped with exit code: " + this.returnCode;
+ this.stack = this.toString() + "\n" + (new Error()).stack;
+};
+
+MongoRunner.StopError.prototype = Object.create(Error.prototype);
+MongoRunner.StopError.prototype.constructor = MongoRunner.StopError;
+
+// Constants for exit codes of MongoDB processes
+MongoRunner.EXIT_ABORT = -6;
+MongoRunner.EXIT_CLEAN = 0;
+MongoRunner.EXIT_BADOPTIONS = 2;
+MongoRunner.EXIT_REPLICATION_ERROR = 3;
+MongoRunner.EXIT_NEED_UPGRADE = 4;
+MongoRunner.EXIT_SHARDING_ERROR = 5;
+// SIGKILL is translated to TerminateProcess() on Windows, which causes the program to
+// terminate with exit code 1.
+MongoRunner.EXIT_SIGKILL = _isWindows() ? 1 : -9;
+MongoRunner.EXIT_KILL = 12;
+MongoRunner.EXIT_ABRUPT = 14;
+MongoRunner.EXIT_NTSERVICE_ERROR = 20;
+MongoRunner.EXIT_JAVA = 21;
+MongoRunner.EXIT_OOM_MALLOC = 42;
+MongoRunner.EXIT_OOM_REALLOC = 43;
+MongoRunner.EXIT_FS = 45;
+MongoRunner.EXIT_CLOCK_SKEW = 47; // OpTime clock skew; deprecated
+MongoRunner.EXIT_NET_ERROR = 48;
+MongoRunner.EXIT_WINDOWS_SERVICE_STOP = 49;
+MongoRunner.EXIT_POSSIBLE_CORRUPTION = 60;
+MongoRunner.EXIT_NEED_DOWNGRADE = 62;
+MongoRunner.EXIT_UNCAUGHT = 100; // top level exception that wasn't caught
+MongoRunner.EXIT_TEST = 101;
+
+MongoRunner.validateCollectionsCallback = function(port) {};
+
+/**
+ * Kills a mongod process.
+ *
+ * @param {Mongo} conn the connection object to the process to kill
+ * @param {number} signal The signal number to use for killing
+ * @param {Object} opts Additional options. Format:
+ * {
+ * auth: {
+ * user {string}: admin user name
+ * pwd {string}: admin password
+ * },
+ * skipValidation: <bool>,
+ * allowedExitCode: <int>
+ * }
+ *
+ * Note: The auth option is required in a authenticated mongod running in Windows since
+ * it uses the shutdown command, which requires admin credentials.
+ */
+MongoRunner.stopMongod = function(conn, signal, opts) {
+ if (!conn.pid) {
+ throw new Error("first arg must have a `pid` property; " +
+ "it is usually the object returned from MongoRunner.runMongod/s");
+ }
- var allowedExitCode = MongoRunner.EXIT_CLEAN;
+ if (!conn.port) {
+ throw new Error("first arg must have a `port` property; " +
+ "it is usually the object returned from MongoRunner.runMongod/s");
+ }
- if (opts.allowedExitCode) {
- allowedExitCode = opts.allowedExitCode;
- }
+ signal = parseInt(signal) || 15;
+ opts = opts || {};
- var port = parseInt(conn.port);
+ var allowedExitCode = MongoRunner.EXIT_CLEAN;
- var pid = conn.pid;
- // If the return code is in the serverExitCodeMap, it means the server crashed on startup.
- // We just use the recorded return code instead of stopping the program.
- var returnCode;
- if (serverExitCodeMap.hasOwnProperty(port)) {
- returnCode = serverExitCodeMap[port];
- delete serverExitCodeMap[port];
- } else {
- // Invoke callback to validate collections and indexes before shutting down mongod.
- // We skip calling the callback function when the expected return code of
- // the mongod process is non-zero since it's likely the process has already exited.
+ if (opts.allowedExitCode) {
+ allowedExitCode = opts.allowedExitCode;
+ }
- var skipValidation = false;
- if (opts.skipValidation) {
- skipValidation = true;
- }
+ var port = parseInt(conn.port);
- if (allowedExitCode === MongoRunner.EXIT_CLEAN && !skipValidation) {
- MongoRunner.validateCollectionsCallback(port);
- }
+ var pid = conn.pid;
+ // If the return code is in the serverExitCodeMap, it means the server crashed on startup.
+ // We just use the recorded return code instead of stopping the program.
+ var returnCode;
+ if (serverExitCodeMap.hasOwnProperty(port)) {
+ returnCode = serverExitCodeMap[port];
+ delete serverExitCodeMap[port];
+ } else {
+ // Invoke callback to validate collections and indexes before shutting down mongod.
+ // We skip calling the callback function when the expected return code of
+ // the mongod process is non-zero since it's likely the process has already exited.
- returnCode = _stopMongoProgram(port, signal, opts);
+ var skipValidation = false;
+ if (opts.skipValidation) {
+ skipValidation = true;
}
- if (allowedExitCode !== returnCode) {
- throw new MongoRunner.StopError(returnCode);
- } else if (returnCode !== MongoRunner.EXIT_CLEAN) {
- print("MongoDB process on port " + port + " intentionally exited with error code ",
- returnCode);
+
+ if (allowedExitCode === MongoRunner.EXIT_CLEAN && !skipValidation) {
+ MongoRunner.validateCollectionsCallback(port);
}
- return returnCode;
- };
+ returnCode = _stopMongoProgram(port, signal, opts);
+ }
+ if (allowedExitCode !== returnCode) {
+ throw new MongoRunner.StopError(returnCode);
+ } else if (returnCode !== MongoRunner.EXIT_CLEAN) {
+ print("MongoDB process on port " + port + " intentionally exited with error code ",
+ returnCode);
+ }
- MongoRunner.stopMongos = MongoRunner.stopMongod;
-
- /**
- * Starts an instance of the specified mongo tool
- *
- * @param {String} binaryName - The name of the tool to run.
- * @param {Object} [opts={}] - Options of the form --flag or --key=value to pass to the tool.
- * @param {string} [opts.binVersion] - The version of the tool to run.
- *
- * @param {...string} positionalArgs - Positional arguments to pass to the tool after all
- * options have been specified. For example,
- * MongoRunner.runMongoTool("executable", {key: value}, arg1, arg2) would invoke
- * ./executable --key value arg1 arg2.
- *
- * @see MongoRunner.arrOptions
- */
- MongoRunner.runMongoTool = function(binaryName, opts, ...positionalArgs) {
-
- var opts = opts || {};
-
- // Normalize and get the binary version to use
- if (opts.binVersion instanceof MongoRunner.versionIterator.iterator) {
- // Advance the version iterator so that subsequent calls to MongoRunner.runMongoTool()
- // use the next version in the list.
- const iterator = opts.binVersion;
- opts.binVersion = iterator.current();
- iterator.advance();
- }
- opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+ return returnCode;
+};
+
+MongoRunner.stopMongos = MongoRunner.stopMongod;
+
+/**
+ * Starts an instance of the specified mongo tool
+ *
+ * @param {String} binaryName - The name of the tool to run.
+ * @param {Object} [opts={}] - Options of the form --flag or --key=value to pass to the tool.
+ * @param {string} [opts.binVersion] - The version of the tool to run.
+ *
+ * @param {...string} positionalArgs - Positional arguments to pass to the tool after all
+ * options have been specified. For example,
+ * MongoRunner.runMongoTool("executable", {key: value}, arg1, arg2) would invoke
+ * ./executable --key value arg1 arg2.
+ *
+ * @see MongoRunner.arrOptions
+ */
+MongoRunner.runMongoTool = function(binaryName, opts, ...positionalArgs) {
+ var opts = opts || {};
+
+ // Normalize and get the binary version to use
+ if (opts.binVersion instanceof MongoRunner.versionIterator.iterator) {
+ // Advance the version iterator so that subsequent calls to MongoRunner.runMongoTool()
+ // use the next version in the list.
+ const iterator = opts.binVersion;
+ opts.binVersion = iterator.current();
+ iterator.advance();
+ }
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
- // Recent versions of the mongo tools support a --dialTimeout flag to set for how
- // long they retry connecting to a mongod or mongos process. We have them retry
- // connecting for up to 30 seconds to handle when the tests are run on a
- // resource-constrained host machine.
- //
- // The bsondump tool doesn't accept the --dialTimeout flag because it doesn't connect to a
- // mongod or mongos process.
- if (!opts.hasOwnProperty('dialTimeout') && binaryName !== 'bsondump' &&
- _toolVersionSupportsDialTimeout(opts.binVersion)) {
- opts['dialTimeout'] = '30';
- }
+ // Recent versions of the mongo tools support a --dialTimeout flag to set for how
+ // long they retry connecting to a mongod or mongos process. We have them retry
+ // connecting for up to 30 seconds to handle when the tests are run on a
+ // resource-constrained host machine.
+ //
+ // The bsondump tool doesn't accept the --dialTimeout flag because it doesn't connect to a
+ // mongod or mongos process.
+ if (!opts.hasOwnProperty('dialTimeout') && binaryName !== 'bsondump' &&
+ _toolVersionSupportsDialTimeout(opts.binVersion)) {
+ opts['dialTimeout'] = '30';
+ }
- // Convert 'opts' into an array of arguments.
- var argsArray = MongoRunner.arrOptions(binaryName, opts);
+ // Convert 'opts' into an array of arguments.
+ var argsArray = MongoRunner.arrOptions(binaryName, opts);
- // Append any positional arguments that were specified.
- argsArray.push(...positionalArgs);
+ // Append any positional arguments that were specified.
+ argsArray.push(...positionalArgs);
- return runMongoProgram.apply(null, argsArray);
+ return runMongoProgram.apply(null, argsArray);
+};
- };
+var _toolVersionSupportsDialTimeout = function(version) {
+ if (version === "latest" || version === "") {
+ return true;
+ }
+ var versionParts =
+ convertVersionStringToArray(version).slice(0, 3).map(part => parseInt(part, 10));
+ if (versionParts.length === 2) {
+ versionParts.push(Infinity);
+ }
- var _toolVersionSupportsDialTimeout = function(version) {
- if (version === "latest" || version === "") {
- return true;
- }
- var versionParts =
- convertVersionStringToArray(version).slice(0, 3).map(part => parseInt(part, 10));
- if (versionParts.length === 2) {
- versionParts.push(Infinity);
- }
+ if (versionParts[0] > 3 || (versionParts[0] === 3 && versionParts[1] > 3)) {
+ // The --dialTimeout command line option is supported by the tools
+ // with a major version newer than 3.3.
+ return true;
+ }
- if (versionParts[0] > 3 || (versionParts[0] === 3 && versionParts[1] > 3)) {
- // The --dialTimeout command line option is supported by the tools
- // with a major version newer than 3.3.
+ for (var supportedVersion of ["3.3.4", "3.2.5", "3.0.12"]) {
+ var supportedVersionParts = convertVersionStringToArray(supportedVersion)
+ .slice(0, 3)
+ .map(part => parseInt(part, 10));
+ if (versionParts[0] === supportedVersionParts[0] &&
+ versionParts[1] === supportedVersionParts[1] &&
+ versionParts[2] >= supportedVersionParts[2]) {
return true;
}
+ }
+ return false;
+};
+
+// Given a test name figures out a directory for that test to use for dump files and makes sure
+// that directory exists and is empty.
+MongoRunner.getAndPrepareDumpDirectory = function(testName) {
+ var dir = MongoRunner.dataPath + testName + "_external/";
+ resetDbpath(dir);
+ return dir;
+};
+
+// Start a mongod instance and return a 'Mongo' object connected to it.
+// This function's arguments are passed as command line arguments to mongod.
+// The specified 'dbpath' is cleared if it exists, created if not.
+// var conn = _startMongodEmpty("--port", 30000, "--dbpath", "asdf");
+var _startMongodEmpty = function() {
+ var args = createMongoArgs("mongod", Array.from(arguments));
+
+ var dbpath = _parsePath.apply(null, args);
+ resetDbpath(dbpath);
+
+ return startMongoProgram.apply(null, args);
+};
+
+_startMongod = function() {
+ print("startMongod WARNING DELETES DATA DIRECTORY THIS IS FOR TESTING ONLY");
+ return _startMongodEmpty.apply(null, arguments);
+};
+
+/**
+ * Returns a new argArray with any test-specific arguments added.
+ */
+function appendSetParameterArgs(argArray) {
+ function argArrayContains(key) {
+ return (argArray
+ .filter((val) => {
+ return typeof val === "string" && val.indexOf(key) === 0;
+ })
+ .length > 0);
+ }
- for (var supportedVersion of["3.3.4", "3.2.5", "3.0.12"]) {
- var supportedVersionParts = convertVersionStringToArray(supportedVersion)
- .slice(0, 3)
- .map(part => parseInt(part, 10));
- if (versionParts[0] === supportedVersionParts[0] &&
- versionParts[1] === supportedVersionParts[1] &&
- versionParts[2] >= supportedVersionParts[2]) {
- return true;
- }
- }
- return false;
- };
-
- // Given a test name figures out a directory for that test to use for dump files and makes sure
- // that directory exists and is empty.
- MongoRunner.getAndPrepareDumpDirectory = function(testName) {
- var dir = MongoRunner.dataPath + testName + "_external/";
- resetDbpath(dir);
- return dir;
- };
-
- // Start a mongod instance and return a 'Mongo' object connected to it.
- // This function's arguments are passed as command line arguments to mongod.
- // The specified 'dbpath' is cleared if it exists, created if not.
- // var conn = _startMongodEmpty("--port", 30000, "--dbpath", "asdf");
- var _startMongodEmpty = function() {
- var args = createMongoArgs("mongod", Array.from(arguments));
-
- var dbpath = _parsePath.apply(null, args);
- resetDbpath(dbpath);
+ function argArrayContainsSetParameterValue(value) {
+ assert(value.endsWith("="), "Expected value argument to be of the form <parameterName>=");
+ return argArray.some(function(el) {
+ return typeof el === "string" && el.startsWith(value);
+ });
+ }
- return startMongoProgram.apply(null, args);
- };
+ // programName includes the version, e.g., mongod-3.2.
+ // baseProgramName is the program name without any version information, e.g., mongod.
+ let programName = argArray[0];
- _startMongod = function() {
- print("startMongod WARNING DELETES DATA DIRECTORY THIS IS FOR TESTING ONLY");
- return _startMongodEmpty.apply(null, arguments);
- };
+ let [baseProgramName, programVersion] = programName.split("-");
+ let programMajorMinorVersion = 0;
+ if (programVersion) {
+ let [major, minor, point] = programVersion.split(".");
+ programMajorMinorVersion = parseInt(major) * 100 + parseInt(minor);
+ }
- /**
- * Returns a new argArray with any test-specific arguments added.
- */
- function appendSetParameterArgs(argArray) {
- function argArrayContains(key) {
- return (argArray
- .filter((val) => {
- return typeof val === "string" && val.indexOf(key) === 0;
- })
- .length > 0);
+ if (baseProgramName === 'mongod' || baseProgramName === 'mongos') {
+ if (jsTest.options().enableTestCommands) {
+ argArray.push(...['--setParameter', "enableTestCommands=1"]);
}
-
- function argArrayContainsSetParameterValue(value) {
- assert(value.endsWith("="),
- "Expected value argument to be of the form <parameterName>=");
- return argArray.some(function(el) {
- return typeof el === "string" && el.startsWith(value);
- });
+ if (jsTest.options().authMechanism && jsTest.options().authMechanism != "SCRAM-SHA-1") {
+ if (!argArrayContainsSetParameterValue('authenticationMechanisms=')) {
+ argArray.push(...['--setParameter',
+ "authenticationMechanisms=" + jsTest.options().authMechanism]);
+ }
}
-
- // programName includes the version, e.g., mongod-3.2.
- // baseProgramName is the program name without any version information, e.g., mongod.
- let programName = argArray[0];
-
- let [baseProgramName, programVersion] = programName.split("-");
- let programMajorMinorVersion = 0;
- if (programVersion) {
- let [major, minor, point] = programVersion.split(".");
- programMajorMinorVersion = parseInt(major) * 100 + parseInt(minor);
+ if (jsTest.options().auth) {
+ argArray.push(...['--setParameter', "enableLocalhostAuthBypass=false"]);
}
- if (baseProgramName === 'mongod' || baseProgramName === 'mongos') {
- if (jsTest.options().enableTestCommands) {
- argArray.push(...['--setParameter', "enableTestCommands=1"]);
- }
- if (jsTest.options().authMechanism && jsTest.options().authMechanism != "SCRAM-SHA-1") {
- if (!argArrayContainsSetParameterValue('authenticationMechanisms=')) {
- argArray.push(
- ...['--setParameter',
- "authenticationMechanisms=" + jsTest.options().authMechanism]);
+ // New options in 3.5.x
+ if (!programMajorMinorVersion || programMajorMinorVersion >= 305) {
+ if (jsTest.options().serviceExecutor) {
+ if (!argArrayContains("--serviceExecutor")) {
+ argArray.push(...["--serviceExecutor", jsTest.options().serviceExecutor]);
}
}
- if (jsTest.options().auth) {
- argArray.push(...['--setParameter', "enableLocalhostAuthBypass=false"]);
+
+ if (jsTest.options().transportLayer) {
+ if (!argArrayContains("--transportLayer")) {
+ argArray.push(...["--transportLayer", jsTest.options().transportLayer]);
+ }
}
- // New options in 3.5.x
- if (!programMajorMinorVersion || programMajorMinorVersion >= 305) {
- if (jsTest.options().serviceExecutor) {
- if (!argArrayContains("--serviceExecutor")) {
- argArray.push(...["--serviceExecutor", jsTest.options().serviceExecutor]);
+ // Disable background cache refreshing to avoid races in tests
+ argArray.push(...['--setParameter', "disableLogicalSessionCacheRefresh=true"]);
+ }
+
+ // Since options may not be backward compatible, mongos options are not
+ // set on older versions, e.g., mongos-3.0.
+ if (programName.endsWith('mongos')) {
+ // apply setParameters for mongos
+ if (jsTest.options().setParametersMongos) {
+ let params = jsTest.options().setParametersMongos;
+ for (let paramName of Object.keys(params)) {
+ // Only set the 'logComponentVerbosity' parameter if it has not already
+ // been specified in the given argument array. This means that any
+ // 'logComponentVerbosity' settings passed through via TestData will
+ // always be overridden by settings passed directly to MongoRunner from
+ // within the shell.
+ if (paramName === "logComponentVerbosity" &&
+ argArrayContains("logComponentVerbosity")) {
+ continue;
}
+ const paramVal = ((param) => {
+ if (typeof param === "object") {
+ return JSON.stringify(param);
+ }
+
+ return param;
+ })(params[paramName]);
+ const setParamStr = paramName + "=" + paramVal;
+ argArray.push(...['--setParameter', setParamStr]);
+ }
+ }
+ } else if (baseProgramName === 'mongod') {
+ if (jsTestOptions().roleGraphInvalidationIsFatal) {
+ argArray.push(...['--setParameter', "roleGraphInvalidationIsFatal=true"]);
+ }
+
+ // Set storageEngine for mongod. There was no storageEngine parameter before 3.0.
+ if (jsTest.options().storageEngine &&
+ (!programVersion || programMajorMinorVersion >= 300)) {
+ if (!argArrayContains("--storageEngine")) {
+ argArray.push(...['--storageEngine', jsTest.options().storageEngine]);
}
+ }
- if (jsTest.options().transportLayer) {
- if (!argArrayContains("--transportLayer")) {
- argArray.push(...["--transportLayer", jsTest.options().transportLayer]);
+ // New mongod-specific options in 4.0.x
+ if (!programMajorMinorVersion || programMajorMinorVersion >= 400) {
+ if (jsTest.options().transactionLifetimeLimitSeconds !== undefined) {
+ if (!argArrayContainsSetParameterValue("transactionLifetimeLimitSeconds=")) {
+ argArray.push(...["--setParameter",
+ "transactionLifetimeLimitSeconds=" +
+ jsTest.options().transactionLifetimeLimitSeconds]);
}
}
+ }
- // Disable background cache refreshing to avoid races in tests
- argArray.push(...['--setParameter', "disableLogicalSessionCacheRefresh=true"]);
+ // TODO: Make this unconditional in 3.8.
+ if (!programMajorMinorVersion || programMajorMinorVersion > 304) {
+ if (!argArrayContainsSetParameterValue('orphanCleanupDelaySecs=')) {
+ argArray.push(...['--setParameter', 'orphanCleanupDelaySecs=1']);
+ }
}
- // Since options may not be backward compatible, mongos options are not
- // set on older versions, e.g., mongos-3.0.
- if (programName.endsWith('mongos')) {
- // apply setParameters for mongos
- if (jsTest.options().setParametersMongos) {
- let params = jsTest.options().setParametersMongos;
+ // Since options may not be backward compatible, mongod options are not
+ // set on older versions, e.g., mongod-3.0.
+ if (programName.endsWith('mongod')) {
+ if (jsTest.options().storageEngine === "wiredTiger" ||
+ !jsTest.options().storageEngine) {
+ if (jsTest.options().enableMajorityReadConcern !== undefined &&
+ !argArrayContains("--enableMajorityReadConcern")) {
+ argArray.push(...['--enableMajorityReadConcern',
+ jsTest.options().enableMajorityReadConcern.toString()]);
+ }
+ if (jsTest.options().storageEngineCacheSizeGB &&
+ !argArrayContains('--wiredTigerCacheSizeGB')) {
+ argArray.push(...['--wiredTigerCacheSizeGB',
+ jsTest.options().storageEngineCacheSizeGB]);
+ }
+ if (jsTest.options().wiredTigerEngineConfigString &&
+ !argArrayContains('--wiredTigerEngineConfigString')) {
+ argArray.push(...['--wiredTigerEngineConfigString',
+ jsTest.options().wiredTigerEngineConfigString]);
+ }
+ if (jsTest.options().wiredTigerCollectionConfigString &&
+ !argArrayContains('--wiredTigerCollectionConfigString')) {
+ argArray.push(...['--wiredTigerCollectionConfigString',
+ jsTest.options().wiredTigerCollectionConfigString]);
+ }
+ if (jsTest.options().wiredTigerIndexConfigString &&
+ !argArrayContains('--wiredTigerIndexConfigString')) {
+ argArray.push(...['--wiredTigerIndexConfigString',
+ jsTest.options().wiredTigerIndexConfigString]);
+ }
+ } else if (jsTest.options().storageEngine === "rocksdb") {
+ if (jsTest.options().storageEngineCacheSizeGB) {
+ argArray.push(
+ ...['--rocksdbCacheSizeGB', jsTest.options().storageEngineCacheSizeGB]);
+ }
+ } else if (jsTest.options().storageEngine === "inMemory") {
+ if (jsTest.options().storageEngineCacheSizeGB &&
+ !argArrayContains("--inMemorySizeGB")) {
+ argArray.push(
+ ...["--inMemorySizeGB", jsTest.options().storageEngineCacheSizeGB]);
+ }
+ }
+ // apply setParameters for mongod. The 'setParameters' field should be given as
+ // a plain JavaScript object, where each key is a parameter name and the value
+ // is the value to set for that parameter.
+ if (jsTest.options().setParameters) {
+ let params = jsTest.options().setParameters;
for (let paramName of Object.keys(params)) {
// Only set the 'logComponentVerbosity' parameter if it has not already
// been specified in the given argument array. This means that any
@@ -1118,6 +1198,7 @@ var MongoRunner, _startMongod, startMongoProgram, runMongoProgram, startMongoPro
argArrayContains("logComponentVerbosity")) {
continue;
}
+
const paramVal = ((param) => {
if (typeof param === "object") {
return JSON.stringify(param);
@@ -1129,261 +1210,157 @@ var MongoRunner, _startMongod, startMongoProgram, runMongoProgram, startMongoPro
argArray.push(...['--setParameter', setParamStr]);
}
}
- } else if (baseProgramName === 'mongod') {
- if (jsTestOptions().roleGraphInvalidationIsFatal) {
- argArray.push(...['--setParameter', "roleGraphInvalidationIsFatal=true"]);
- }
-
- // Set storageEngine for mongod. There was no storageEngine parameter before 3.0.
- if (jsTest.options().storageEngine &&
- (!programVersion || programMajorMinorVersion >= 300)) {
- if (!argArrayContains("--storageEngine")) {
- argArray.push(...['--storageEngine', jsTest.options().storageEngine]);
- }
- }
-
- // New mongod-specific options in 4.0.x
- if (!programMajorMinorVersion || programMajorMinorVersion >= 400) {
- if (jsTest.options().transactionLifetimeLimitSeconds !== undefined) {
- if (!argArrayContainsSetParameterValue(
- "transactionLifetimeLimitSeconds=")) {
- argArray.push(
- ...["--setParameter",
- "transactionLifetimeLimitSeconds=" +
- jsTest.options().transactionLifetimeLimitSeconds]);
- }
- }
- }
-
- // TODO: Make this unconditional in 3.8.
- if (!programMajorMinorVersion || programMajorMinorVersion > 304) {
- if (!argArrayContainsSetParameterValue('orphanCleanupDelaySecs=')) {
- argArray.push(...['--setParameter', 'orphanCleanupDelaySecs=1']);
- }
- }
-
- // Since options may not be backward compatible, mongod options are not
- // set on older versions, e.g., mongod-3.0.
- if (programName.endsWith('mongod')) {
- if (jsTest.options().storageEngine === "wiredTiger" ||
- !jsTest.options().storageEngine) {
- if (jsTest.options().enableMajorityReadConcern !== undefined &&
- !argArrayContains("--enableMajorityReadConcern")) {
- argArray.push(
- ...['--enableMajorityReadConcern',
- jsTest.options().enableMajorityReadConcern.toString()]);
- }
- if (jsTest.options().storageEngineCacheSizeGB &&
- !argArrayContains('--wiredTigerCacheSizeGB')) {
- argArray.push(...['--wiredTigerCacheSizeGB',
- jsTest.options().storageEngineCacheSizeGB]);
- }
- if (jsTest.options().wiredTigerEngineConfigString &&
- !argArrayContains('--wiredTigerEngineConfigString')) {
- argArray.push(...['--wiredTigerEngineConfigString',
- jsTest.options().wiredTigerEngineConfigString]);
- }
- if (jsTest.options().wiredTigerCollectionConfigString &&
- !argArrayContains('--wiredTigerCollectionConfigString')) {
- argArray.push(...['--wiredTigerCollectionConfigString',
- jsTest.options().wiredTigerCollectionConfigString]);
- }
- if (jsTest.options().wiredTigerIndexConfigString &&
- !argArrayContains('--wiredTigerIndexConfigString')) {
- argArray.push(...['--wiredTigerIndexConfigString',
- jsTest.options().wiredTigerIndexConfigString]);
- }
- } else if (jsTest.options().storageEngine === "rocksdb") {
- if (jsTest.options().storageEngineCacheSizeGB) {
- argArray.push(...['--rocksdbCacheSizeGB',
- jsTest.options().storageEngineCacheSizeGB]);
- }
- } else if (jsTest.options().storageEngine === "inMemory") {
- if (jsTest.options().storageEngineCacheSizeGB &&
- !argArrayContains("--inMemorySizeGB")) {
- argArray.push(
- ...["--inMemorySizeGB", jsTest.options().storageEngineCacheSizeGB]);
- }
- }
- // apply setParameters for mongod. The 'setParameters' field should be given as
- // a plain JavaScript object, where each key is a parameter name and the value
- // is the value to set for that parameter.
- if (jsTest.options().setParameters) {
- let params = jsTest.options().setParameters;
- for (let paramName of Object.keys(params)) {
- // Only set the 'logComponentVerbosity' parameter if it has not already
- // been specified in the given argument array. This means that any
- // 'logComponentVerbosity' settings passed through via TestData will
- // always be overridden by settings passed directly to MongoRunner from
- // within the shell.
- if (paramName === "logComponentVerbosity" &&
- argArrayContains("logComponentVerbosity")) {
- continue;
- }
-
- const paramVal = ((param) => {
- if (typeof param === "object") {
- return JSON.stringify(param);
- }
-
- return param;
- })(params[paramName]);
- const setParamStr = paramName + "=" + paramVal;
- argArray.push(...['--setParameter', setParamStr]);
- }
- }
- }
}
}
-
- return argArray;
}
- /**
- * Start a mongo process with a particular argument array.
- * If we aren't waiting for connect, return {pid: <pid>}.
- * If we are waiting for connect:
- * returns connection to process on success;
- * otherwise returns null if we fail to connect.
- */
- MongoRunner._startWithArgs = function(argArray, env, waitForConnect) {
- // TODO: Make there only be one codepath for starting mongo processes
-
- argArray = appendSetParameterArgs(argArray);
- var port = _parsePort.apply(null, argArray);
- var pid = -1;
- if (env === undefined) {
- pid = _startMongoProgram.apply(null, argArray);
- } else {
- pid = _startMongoProgram({args: argArray, env: env});
- }
+ return argArray;
+}
+
+/**
+ * Start a mongo process with a particular argument array.
+ * If we aren't waiting for connect, return {pid: <pid>}.
+ * If we are waiting for connect:
+ * returns connection to process on success;
+ * otherwise returns null if we fail to connect.
+ */
+MongoRunner._startWithArgs = function(argArray, env, waitForConnect) {
+ // TODO: Make there only be one codepath for starting mongo processes
+
+ argArray = appendSetParameterArgs(argArray);
+ var port = _parsePort.apply(null, argArray);
+ var pid = -1;
+ if (env === undefined) {
+ pid = _startMongoProgram.apply(null, argArray);
+ } else {
+ pid = _startMongoProgram({args: argArray, env: env});
+ }
- delete serverExitCodeMap[port];
- if (!waitForConnect) {
- return {
- pid: pid,
- port: port,
- };
- }
+ delete serverExitCodeMap[port];
+ if (!waitForConnect) {
+ return {
+ pid: pid,
+ port: port,
+ };
+ }
- var conn = null;
- assert.soon(function() {
- try {
- conn = new Mongo("127.0.0.1:" + port);
- conn.pid = pid;
+ var conn = null;
+ assert.soon(function() {
+ try {
+ conn = new Mongo("127.0.0.1:" + port);
+ conn.pid = pid;
+ return true;
+ } catch (e) {
+ var res = checkProgram(pid);
+ if (!res.alive) {
+ print("Could not start mongo program at " + port +
+ ", process ended with exit code: " + res.exitCode);
+ serverExitCodeMap[port] = res.exitCode;
return true;
- } catch (e) {
- var res = checkProgram(pid);
- if (!res.alive) {
- print("Could not start mongo program at " + port +
- ", process ended with exit code: " + res.exitCode);
- serverExitCodeMap[port] = res.exitCode;
- return true;
- }
}
- return false;
- }, "unable to connect to mongo program on port " + port, 600 * 1000);
-
- return conn;
- };
-
- /**
- * DEPRECATED
- *
- * Start mongod or mongos and return a Mongo() object connected to there.
- * This function's first argument is "mongod" or "mongos" program name, \
- * and subsequent arguments to this function are passed as
- * command line arguments to the program.
- */
- startMongoProgram = function() {
- var port = _parsePort.apply(null, arguments);
-
- // Enable test commands.
- // TODO: Make this work better with multi-version testing so that we can support
- // enabling this on 2.4 when testing 2.6
- var args = Array.from(arguments);
- args = appendSetParameterArgs(args);
- var pid = _startMongoProgram.apply(null, args);
-
- var m;
- assert.soon(function() {
- try {
- m = new Mongo("127.0.0.1:" + port);
- m.pid = pid;
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000);
+
+ return conn;
+};
+
+/**
+ * DEPRECATED
+ *
+ * Start mongod or mongos and return a Mongo() object connected to there.
+ * This function's first argument is "mongod" or "mongos" program name, \
+ * and subsequent arguments to this function are passed as
+ * command line arguments to the program.
+ */
+startMongoProgram = function() {
+ var port = _parsePort.apply(null, arguments);
+
+ // Enable test commands.
+ // TODO: Make this work better with multi-version testing so that we can support
+ // enabling this on 2.4 when testing 2.6
+ var args = Array.from(arguments);
+ args = appendSetParameterArgs(args);
+ var pid = _startMongoProgram.apply(null, args);
+
+ var m;
+ assert.soon(function() {
+ try {
+ m = new Mongo("127.0.0.1:" + port);
+ m.pid = pid;
+ return true;
+ } catch (e) {
+ var res = checkProgram(pid);
+ if (!res.alive) {
+ print("Could not start mongo program at " + port +
+ ", process ended with exit code: " + res.exitCode);
+ // Break out
+ m = null;
return true;
- } catch (e) {
- var res = checkProgram(pid);
- if (!res.alive) {
- print("Could not start mongo program at " + port +
- ", process ended with exit code: " + res.exitCode);
- // Break out
- m = null;
- return true;
- }
}
- return false;
- }, "unable to connect to mongo program on port " + port, 600 * 1000);
-
- return m;
- };
-
- runMongoProgram = function() {
- var args = Array.from(arguments);
- args = appendSetParameterArgs(args);
- var progName = args[0];
-
- // The bsondump tool doesn't support these auth related command line flags.
- if (jsTestOptions().auth && progName != 'mongod' && progName != 'bsondump') {
- args = args.slice(1);
- args.unshift(progName,
- '-u',
- jsTestOptions().authUser,
- '-p',
- jsTestOptions().authPassword,
- '--authenticationDatabase=admin');
}
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000);
+
+ return m;
+};
+
+runMongoProgram = function() {
+ var args = Array.from(arguments);
+ args = appendSetParameterArgs(args);
+ var progName = args[0];
+
+ // The bsondump tool doesn't support these auth related command line flags.
+ if (jsTestOptions().auth && progName != 'mongod' && progName != 'bsondump') {
+ args = args.slice(1);
+ args.unshift(progName,
+ '-u',
+ jsTestOptions().authUser,
+ '-p',
+ jsTestOptions().authPassword,
+ '--authenticationDatabase=admin');
+ }
- if (progName == 'mongo' && !_useWriteCommandsDefault()) {
- progName = args[0];
- args = args.slice(1);
- args.unshift(progName, '--useLegacyWriteOps');
- }
-
- return _runMongoProgram.apply(null, args);
- };
-
- // Start a mongo program instance. This function's first argument is the
- // program name, and subsequent arguments to this function are passed as
- // command line arguments to the program. Returns pid of the spawned program.
- startMongoProgramNoConnect = function() {
- var args = Array.from(arguments);
- args = appendSetParameterArgs(args);
- var progName = args[0];
-
- if (jsTestOptions().auth) {
- args = args.slice(1);
- args.unshift(progName,
- '-u',
- jsTestOptions().authUser,
- '-p',
- jsTestOptions().authPassword,
- '--authenticationDatabase=admin');
- }
+ if (progName == 'mongo' && !_useWriteCommandsDefault()) {
+ progName = args[0];
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
- if (progName == 'mongo' && !_useWriteCommandsDefault()) {
- args = args.slice(1);
- args.unshift(progName, '--useLegacyWriteOps');
- }
+ return _runMongoProgram.apply(null, args);
+};
+
+// Start a mongo program instance. This function's first argument is the
+// program name, and subsequent arguments to this function are passed as
+// command line arguments to the program. Returns pid of the spawned program.
+startMongoProgramNoConnect = function() {
+ var args = Array.from(arguments);
+ args = appendSetParameterArgs(args);
+ var progName = args[0];
+
+ if (jsTestOptions().auth) {
+ args = args.slice(1);
+ args.unshift(progName,
+ '-u',
+ jsTestOptions().authUser,
+ '-p',
+ jsTestOptions().authPassword,
+ '--authenticationDatabase=admin');
+ }
- return _startMongoProgram.apply(null, args);
- };
+ if (progName == 'mongo' && !_useWriteCommandsDefault()) {
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
- myPort = function() {
- var m = db.getMongo();
- if (m.host.match(/:/))
- return m.host.match(/:(.*)/)[1];
- else
- return 27017;
- };
+ return _startMongoProgram.apply(null, args);
+};
+myPort = function() {
+ var m = db.getMongo();
+ if (m.host.match(/:/))
+ return m.host.match(/:(.*)/)[1];
+ else
+ return 27017;
+};
}());
diff --git a/src/mongo/shell/servers_misc.js b/src/mongo/shell/servers_misc.js
index 825bca3d689..4edda039549 100644
--- a/src/mongo/shell/servers_misc.js
+++ b/src/mongo/shell/servers_misc.js
@@ -78,28 +78,28 @@ var allocatePort;
var resetAllocatedPorts;
(function() {
- // Defer initializing these variables until the first call, as TestData attributes may be
- // initialized as part of the --eval argument (e.g. by resmoke.py), which will not be evaluated
- // until after this has loaded.
- var maxPort;
- var nextPort;
-
- allocatePort = function() {
- // The default port was chosen in an attempt to have a large number of unassigned ports that
- // are also outside the ephemeral port range.
- nextPort = nextPort || jsTestOptions().minPort || 20000;
- maxPort = maxPort || jsTestOptions().maxPort || Math.pow(2, 16) - 1;
-
- if (nextPort === maxPort) {
- throw new Error("Exceeded maximum port range in allocatePort()");
- }
- return nextPort++;
- };
+// Defer initializing these variables until the first call, as TestData attributes may be
+// initialized as part of the --eval argument (e.g. by resmoke.py), which will not be evaluated
+// until after this has loaded.
+var maxPort;
+var nextPort;
+
+allocatePort = function() {
+ // The default port was chosen in an attempt to have a large number of unassigned ports that
+ // are also outside the ephemeral port range.
+ nextPort = nextPort || jsTestOptions().minPort || 20000;
+ maxPort = maxPort || jsTestOptions().maxPort || Math.pow(2, 16) - 1;
+
+ if (nextPort === maxPort) {
+ throw new Error("Exceeded maximum port range in allocatePort()");
+ }
+ return nextPort++;
+};
- resetAllocatedPorts = function() {
- jsTest.log("Resetting the range of allocated ports");
- maxPort = nextPort = undefined;
- };
+resetAllocatedPorts = function() {
+ jsTest.log("Resetting the range of allocated ports");
+ maxPort = nextPort = undefined;
+};
})();
/**
@@ -142,9 +142,9 @@ function startParallelShell(jsCode, port, noConnect) {
}
// Convert function into call-string
- if (typeof(jsCode) == "function") {
+ if (typeof (jsCode) == "function") {
jsCode = "(" + jsCode.toString() + ")();";
- } else if (typeof(jsCode) == "string") {
+ } else if (typeof (jsCode) == "string") {
}
// do nothing
else {
@@ -153,7 +153,7 @@ function startParallelShell(jsCode, port, noConnect) {
if (noConnect) {
args.push("--nodb");
- } else if (typeof(db) == "object") {
+ } else if (typeof (db) == "object") {
jsCode = "db = db.getSiblingDB('" + db.getName() + "');" + jsCode;
}
diff --git a/src/mongo/shell/session.js b/src/mongo/shell/session.js
index 0313f46fc0e..0f6eb11323a 100644
--- a/src/mongo/shell/session.js
+++ b/src/mongo/shell/session.js
@@ -5,7 +5,11 @@
* https://github.com/mongodb/specifications/blob/master/source/sessions/driver-sessions.rst#abstract
*/
var {
- DriverSession, SessionOptions, _DummyDriverSession, _DelegatingDriverSession, _ServerSession,
+ DriverSession,
+ SessionOptions,
+ _DummyDriverSession,
+ _DelegatingDriverSession,
+ _ServerSession,
} = (function() {
"use strict";
@@ -416,10 +420,10 @@ var {
if (writeError !== undefined) {
if (jsTest.options().logRetryAttempts) {
- jsTest.log("Retrying " + cmdName +
- " due to retryable write error (code=" +
- writeError.code + "), subsequent retries remaining: " +
- numRetries);
+ jsTest.log(
+ "Retrying " + cmdName +
+ " due to retryable write error (code=" + writeError.code +
+ "), subsequent retries remaining: " + numRetries);
}
if (client.isReplicaSetConnection()) {
client._markNodeAsFailed(
@@ -1035,54 +1039,54 @@ var {
const DummyDriverSession =
makeDriverSessionConstructor( // Force clang-format to break this line.
{
- createServerSession: function createServerSession(client) {
- return {
- injectSessionId: function injectSessionId(cmdObj) {
- return cmdObj;
- },
-
- assignTransactionNumber: function assignTransactionNumber(cmdObj) {
- return cmdObj;
- },
-
- canRetryWrites: function canRetryWrites(cmdObj) {
- return false;
- },
-
- assignTxnInfo: function assignTxnInfo(cmdObj) {
- return cmdObj;
- },
-
- isTxnActive: function isTxnActive() {
- return false;
- },
-
- isFirstStatement: function isFirstStatement() {
- return false;
- },
-
- getTxnOptions: function getTxnOptions() {
- return {};
- },
-
- startTransaction: function startTransaction() {
- throw new Error("Must call startSession() on the Mongo connection " +
- "object before starting a transaction.");
- },
-
- commitTransaction: function commitTransaction() {
- throw new Error("Must call startSession() on the Mongo connection " +
- "object before committing a transaction.");
- },
-
- abortTransaction: function abortTransaction() {
- throw new Error("Must call startSession() on the Mongo connection " +
- "object before aborting a transaction.");
- },
- };
- },
-
- endSession: function endSession(serverSession) {},
+ createServerSession: function createServerSession(client) {
+ return {
+ injectSessionId: function injectSessionId(cmdObj) {
+ return cmdObj;
+ },
+
+ assignTransactionNumber: function assignTransactionNumber(cmdObj) {
+ return cmdObj;
+ },
+
+ canRetryWrites: function canRetryWrites(cmdObj) {
+ return false;
+ },
+
+ assignTxnInfo: function assignTxnInfo(cmdObj) {
+ return cmdObj;
+ },
+
+ isTxnActive: function isTxnActive() {
+ return false;
+ },
+
+ isFirstStatement: function isFirstStatement() {
+ return false;
+ },
+
+ getTxnOptions: function getTxnOptions() {
+ return {};
+ },
+
+ startTransaction: function startTransaction() {
+ throw new Error("Must call startSession() on the Mongo connection " +
+ "object before starting a transaction.");
+ },
+
+ commitTransaction: function commitTransaction() {
+ throw new Error("Must call startSession() on the Mongo connection " +
+ "object before committing a transaction.");
+ },
+
+ abortTransaction: function abortTransaction() {
+ throw new Error("Must call startSession() on the Mongo connection " +
+ "object before aborting a transaction.");
+ },
+ };
+ },
+
+ endSession: function endSession(serverSession) {},
},
{causalConsistency: false, retryWrites: false});
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index f814a02cbf4..6008c769202 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -102,7 +102,6 @@
* configRS - If the config servers are a replset, this will contain the config ReplSetTest object
*/
var ShardingTest = function(params) {
-
if (!(this instanceof ShardingTest)) {
return new ShardingTest(params);
}
@@ -292,16 +291,16 @@ var ShardingTest = function(params) {
countDBsFound++;
printjson(db);
});
- throw Error("couldn't find dbname: " + dbname + " in config.databases. Total DBs: " +
- countDBsFound);
+ throw Error("couldn't find dbname: " + dbname +
+ " in config.databases. Total DBs: " + countDBsFound);
};
this.getNonPrimaries = function(dbname) {
var x = this.config.databases.findOne({_id: dbname});
if (!x) {
this.config.databases.find().forEach(printjson);
- throw Error("couldn't find dbname: " + dbname + " total: " +
- this.config.databases.count());
+ throw Error("couldn't find dbname: " + dbname +
+ " total: " + this.config.databases.count());
}
return this.config.shards.find({_id: {$ne: x.primary}}).map(z => z._id);
@@ -334,8 +333,8 @@ var ShardingTest = function(params) {
}
}
- throw Error("can't find server connection for db '" + dbname + "'s primary shard: " +
- tojson(primaryShard));
+ throw Error("can't find server connection for db '" + dbname +
+ "'s primary shard: " + tojson(primaryShard));
};
this.normalize = function(x) {
@@ -859,9 +858,9 @@ var ShardingTest = function(params) {
}
if (arguments.length >= 3) {
- if (typeof(beforeRestartCallback) !== "function") {
+ if (typeof (beforeRestartCallback) !== "function") {
throw new Error("beforeRestartCallback must be a function but was of type " +
- typeof(beforeRestartCallback));
+ typeof (beforeRestartCallback));
}
beforeRestartCallback();
}
@@ -1621,7 +1620,6 @@ var ShardingTest = function(params) {
MongoRunner.getBinVersionFor(otherParams.configOptions.binVersion)))) {
this.configRS.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
}
-
};
// Stub for a hook to check that collection UUIDs are consistent across shards and the config
diff --git a/src/mongo/shell/shell_options.cpp b/src/mongo/shell/shell_options.cpp
index 026cb9af45b..5181719de4f 100644
--- a/src/mongo/shell/shell_options.cpp
+++ b/src/mongo/shell/shell_options.cpp
@@ -61,7 +61,8 @@ using std::vector;
// SERVER-36807: Limit --setShellParameter to SetParameters we know we want to expose.
const std::set<std::string> kSetShellParameterWhitelist = {
- "disabledSecureAllocatorDomains", "newLineAfterPasswordPromptForTest",
+ "disabledSecureAllocatorDomains",
+ "newLineAfterPasswordPromptForTest",
};
std::string getMongoShellHelp(StringData name, const moe::OptionSection& options) {
@@ -321,14 +322,14 @@ Status storeMongoShellOptions(const moe::Environment& params,
auto* param = paramIt->second;
if (!param->allowedToChangeAtStartup()) {
return {ErrorCodes::BadValue,
- str::stream() << "Cannot use --setShellParameter to set '" << name
- << "' at startup"};
+ str::stream()
+ << "Cannot use --setShellParameter to set '" << name << "' at startup"};
}
auto status = param->setFromString(it.second);
if (!status.isOK()) {
return {ErrorCodes::BadValue,
- str::stream() << "Bad value for parameter '" << name << "': "
- << status.reason()};
+ str::stream()
+ << "Bad value for parameter '" << name << "': " << status.reason()};
}
}
}
diff --git a/src/mongo/shell/shell_options.h b/src/mongo/shell/shell_options.h
index aad972d0dc1..45ac4e55455 100644
--- a/src/mongo/shell/shell_options.h
+++ b/src/mongo/shell/shell_options.h
@@ -98,4 +98,4 @@ bool handlePreValidationMongoShellOptions(const moe::Environment& params,
Status storeMongoShellOptions(const moe::Environment& params, const std::vector<std::string>& args);
void redactPasswordOptions(int argc, char** argv);
-}
+} // namespace mongo
diff --git a/src/mongo/shell/shell_options_init.cpp b/src/mongo/shell/shell_options_init.cpp
index 98cf02f5c53..2ea64570bc8 100644
--- a/src/mongo/shell/shell_options_init.cpp
+++ b/src/mongo/shell/shell_options_init.cpp
@@ -59,4 +59,4 @@ MONGO_STARTUP_OPTIONS_STORE(MongoShellOptions)(InitializerContext* context) {
}
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/shell/shell_utils.cpp b/src/mongo/shell/shell_utils.cpp
index b97e6358b14..09b5335117e 100644
--- a/src/mongo/shell/shell_utils.cpp
+++ b/src/mongo/shell/shell_utils.cpp
@@ -164,10 +164,9 @@ size_t skipOverString(const std::string& code, size_t start, char quote) {
// that the escaping backslash is not itself escaped. Comparisons of start and pos
// are to keep us from reading beyond the beginning of the quoted string.
//
- if (start == pos || code[pos - 1] != '\\' || // previous char was backslash
- start == pos - 1 ||
- code[pos - 2] == '\\' // char before backslash was not another
- ) {
+ if (start == pos || code[pos - 1] != '\\' || // previous char was backslash
+ start == pos - 1 || code[pos - 2] == '\\' // char before backslash was not another
+ ) {
break; // The quote we found was not preceded by an unescaped backslash; it is real
}
++pos; // The quote we found was escaped with backslash, so it doesn't count
diff --git a/src/mongo/shell/shell_utils.h b/src/mongo/shell/shell_utils.h
index 0c856c37ed3..a5b8a0ce50c 100644
--- a/src/mongo/shell/shell_utils.h
+++ b/src/mongo/shell/shell_utils.h
@@ -96,5 +96,5 @@ extern stdx::mutex& mongoProgramOutputMutex;
// Helper to tell if a file exists cross platform
// TODO: Remove this when we have a cross platform file utility library
bool fileExists(const std::string& file);
-}
-}
+} // namespace shell_utils
+} // namespace mongo
diff --git a/src/mongo/shell/shell_utils_extended.cpp b/src/mongo/shell/shell_utils_extended.cpp
index 6a756911f3d..df5444670dd 100644
--- a/src/mongo/shell/shell_utils_extended.cpp
+++ b/src/mongo/shell/shell_utils_extended.cpp
@@ -365,8 +365,8 @@ BSONObj getFileMode(const BSONObj& a, void* data) {
auto fileStatus = boost::filesystem::status(path, ec);
if (ec) {
uasserted(50974,
- str::stream() << "Unable to get status for file \"" << pathStr << "\": "
- << ec.message());
+ str::stream() << "Unable to get status for file \"" << pathStr
+ << "\": " << ec.message());
}
return BSON("" << fileStatus.permissions());
@@ -389,5 +389,5 @@ void installShellUtilsExtended(Scope& scope) {
scope.injectNative("umask", changeUmask);
scope.injectNative("getFileMode", getFileMode);
}
-}
-}
+} // namespace shell_utils
+} // namespace mongo
diff --git a/src/mongo/shell/shell_utils_extended.h b/src/mongo/shell/shell_utils_extended.h
index 0fb83c4d7a0..543095d1187 100644
--- a/src/mongo/shell/shell_utils_extended.h
+++ b/src/mongo/shell/shell_utils_extended.h
@@ -37,4 +37,4 @@ class Scope;
namespace shell_utils {
void installShellUtilsExtended(Scope& scope);
}
-}
+} // namespace mongo
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index 5b7c68d5363..752c7724e42 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -275,11 +275,13 @@ ProgramRunner::ProgramRunner(const BSONObj& args, const BSONObj& env, bool isMon
_port = -1;
string prefix("mongod-");
- bool isMongodProgram = isMongo && (string("mongod") == programName ||
- programName.string().compare(0, prefix.size(), prefix) == 0);
+ bool isMongodProgram = isMongo &&
+ (string("mongod") == programName ||
+ programName.string().compare(0, prefix.size(), prefix) == 0);
prefix = "mongos-";
- bool isMongosProgram = isMongo && (string("mongos") == programName ||
- programName.string().compare(0, prefix.size(), prefix) == 0);
+ bool isMongosProgram = isMongo &&
+ (string("mongos") == programName ||
+ programName.string().compare(0, prefix.size(), prefix) == 0);
if (!isMongo) {
_name = "sh";
diff --git a/src/mongo/shell/types.js b/src/mongo/shell/types.js
index 19c1fb272f3..faaa5d00499 100644
--- a/src/mongo/shell/types.js
+++ b/src/mongo/shell/types.js
@@ -1,5 +1,5 @@
// Date and time types
-if (typeof(Timestamp) != "undefined") {
+if (typeof (Timestamp) != "undefined") {
Timestamp.prototype.tojson = function() {
return this.toString();
};
@@ -265,13 +265,13 @@ Array.stdDev = function(arr) {
Object.extend = function(dst, src, deep) {
for (var k in src) {
var v = src[k];
- if (deep && typeof(v) == "object" && v !== null) {
+ if (deep && typeof (v) == "object" && v !== null) {
if (v.constructor === ObjectId) { // convert ObjectId properly
eval("v = " + tojson(v));
} else if ("floatApprox" in v) { // convert NumberLong properly
eval("v = " + tojson(v));
} else {
- v = Object.extend(typeof(v.length) == "number" ? [] : {}, v, true);
+ v = Object.extend(typeof (v.length) == "number" ? [] : {}, v, true);
}
}
dst[k] = v;
@@ -433,7 +433,7 @@ ObjectId.fromDate = function(source) {
if (source instanceof Date) {
sourceDate = source;
} else {
- throw Error("Cannot create ObjectId from " + typeof(source) + ": " + tojson(source));
+ throw Error("Cannot create ObjectId from " + typeof (source) + ": " + tojson(source));
}
// Convert date object to seconds since Unix epoch.
@@ -449,7 +449,7 @@ ObjectId.fromDate = function(source) {
};
// DBPointer
-if (typeof(DBPointer) != "undefined") {
+if (typeof (DBPointer) != "undefined") {
DBPointer.prototype.fetch = function() {
assert(this.ns, "need a ns");
assert(this.id, "need an id");
@@ -476,7 +476,7 @@ if (typeof(DBPointer) != "undefined") {
}
// DBRef
-if (typeof(DBRef) != "undefined") {
+if (typeof (DBRef) != "undefined") {
DBRef.prototype.fetch = function() {
assert(this.$ref, "need a ns");
assert(this.$id, "need an id");
@@ -513,7 +513,7 @@ if (typeof(DBRef) != "undefined") {
}
// BinData
-if (typeof(BinData) != "undefined") {
+if (typeof (BinData) != "undefined") {
BinData.prototype.tojson = function() {
return this.toString();
};
@@ -529,7 +529,7 @@ if (typeof(BinData) != "undefined") {
}
// Map
-if (typeof(Map) == "undefined") {
+if (typeof (Map) == "undefined") {
Map = function() {
this._data = {};
};
@@ -539,7 +539,7 @@ Map.hash = function(val) {
if (!val)
return val;
- switch (typeof(val)) {
+ switch (typeof (val)) {
case 'string':
case 'number':
case 'date':
@@ -553,7 +553,7 @@ Map.hash = function(val) {
return s;
}
- throw Error("can't hash : " + typeof(val));
+ throw Error("can't hash : " + typeof (val));
};
Map.prototype.put = function(key, value) {
@@ -594,7 +594,7 @@ Map.prototype.values = function() {
return all;
};
-if (typeof(gc) == "undefined") {
+if (typeof (gc) == "undefined") {
gc = function() {
print("warning: using noop gc()");
};
@@ -640,7 +640,6 @@ tojson = function(x, indent, nolint, depth) {
default:
throw Error("tojson can't handle type " + (typeof x));
}
-
};
tojson.MAX_DEPTH = 100;
@@ -655,11 +654,11 @@ tojsonObject = function(x, indent, nolint, depth) {
if (!indent)
indent = "";
- if (typeof(x.tojson) == "function" && x.tojson != tojson) {
+ if (typeof (x.tojson) == "function" && x.tojson != tojson) {
return x.tojson(indent, nolint, depth);
}
- if (x.constructor && typeof(x.constructor.tojson) == "function" &&
+ if (x.constructor && typeof (x.constructor.tojson) == "function" &&
x.constructor.tojson != tojson) {
return x.constructor.tojson(x, indent, nolint, depth);
}
@@ -685,7 +684,7 @@ tojsonObject = function(x, indent, nolint, depth) {
indent += tabSpace;
var keys = x;
- if (typeof(x._simpleKeys) == "function")
+ if (typeof (x._simpleKeys) == "function")
keys = x._simpleKeys();
var fieldStrings = [];
for (var k in keys) {
@@ -721,14 +720,14 @@ printjsononeline = function(x) {
};
isString = function(x) {
- return typeof(x) == "string";
+ return typeof (x) == "string";
};
isNumber = function(x) {
- return typeof(x) == "number";
+ return typeof (x) == "number";
};
// This function returns true even if the argument is an array. See SERVER-14220.
isObject = function(x) {
- return typeof(x) == "object";
+ return typeof (x) == "object";
};
diff --git a/src/mongo/shell/utils.js b/src/mongo/shell/utils.js
index 11e6cde6902..1201e800eeb 100644
--- a/src/mongo/shell/utils.js
+++ b/src/mongo/shell/utils.js
@@ -213,7 +213,7 @@ print.captureAllOutput = function(fn, args) {
};
var indentStr = function(indent, s) {
- if (typeof(s) === "undefined") {
+ if (typeof (s) === "undefined") {
s = indent;
indent = 0;
}
@@ -350,7 +350,7 @@ jsTestLog = function(msg) {
if (typeof msg === "object") {
msg = tojson(msg);
}
- assert.eq(typeof(msg), "string", "Received: " + msg);
+ assert.eq(typeof (msg), "string", "Received: " + msg);
const msgs = ["----", ...msg.split("\n"), "----"].map(s => `[jsTest] ${s}`);
print(`\n\n${msgs.join("\n")}\n\n`);
};
@@ -596,10 +596,10 @@ if (typeof _shouldUseImplicitSessions === 'undefined') {
}
shellPrintHelper = function(x) {
- if (typeof(x) == "undefined") {
+ if (typeof (x) == "undefined") {
// Make sure that we have a db var before we use it
// TODO: This implicit calling of GLE can cause subtle, hard to track issues - remove?
- if (__callLastError && typeof(db) != "undefined" && db.getMongo &&
+ if (__callLastError && typeof (db) != "undefined" && db.getMongo &&
db.getMongo().writeMode() == "legacy") {
__callLastError = false;
// explicit w:1 so that replset getLastErrorDefaults aren't used here which would be bad
@@ -638,7 +638,6 @@ shellPrintHelper = function(x) {
shellAutocomplete = function(
/*prefix*/) { // outer scope function called on init. Actual function at end
-
var universalMethods =
"constructor prototype toString valueOf toLocaleString hasOwnProperty propertyIsEnumerable"
.split(' ');
@@ -743,7 +742,7 @@ shellAutocomplete = function(
{}; // see http://dreaminginjavascript.wordpress.com/2008/08/22/eliminating-duplicates/
for (var i = 0; i < possibilities.length; i++) {
var p = possibilities[i];
- if (typeof(curObj[p]) == "undefined" && curObj != global)
+ if (typeof (curObj[p]) == "undefined" && curObj != global)
continue; // extraGlobals aren't in the global object
if (p.length == 0 || p.length < lastPrefix.length)
continue;
@@ -829,7 +828,7 @@ shellHelper.set = function(str) {
};
shellHelper.it = function() {
- if (typeof(___it___) == "undefined" || ___it___ == null) {
+ if (typeof (___it___) == "undefined" || ___it___ == null) {
print("no cursor");
return;
}
@@ -862,7 +861,7 @@ shellHelper.show = function(what) {
continue;
var val = x[z];
- var mytype = typeof(val);
+ var mytype = typeof (val);
if (mytype == "string" || mytype == "number")
l += z + ":" + val + " ";
@@ -1120,7 +1119,8 @@ shellHelper.show = function(what) {
}
if (matchesKnownImposterSignature) {
- print("\n" + "Warning: Non-Genuine MongoDB Detected\n\n" +
+ print("\n" +
+ "Warning: Non-Genuine MongoDB Detected\n\n" +
"This server or service appears to be an emulation of MongoDB " +
"rather than an official MongoDB product.\n\n" +
@@ -1137,7 +1137,6 @@ shellHelper.show = function(what) {
}
throw Error("don't know how to show [" + what + "]");
-
};
__promptWrapper__ = function(promptFunction) {
@@ -1173,8 +1172,8 @@ Math.sigFig = function(x, N) {
var Random = (function() {
var initialized = false;
- var errorMsg =
- "The random number generator hasn't been seeded yet; " + "call Random.setRandomSeed()";
+ var errorMsg = "The random number generator hasn't been seeded yet; " +
+ "call Random.setRandomSeed()";
// Set the random generator seed.
function srand(s) {
@@ -1248,7 +1247,6 @@ var Random = (function() {
setRandomSeed: setRandomSeed,
srand: srand,
};
-
})();
/**
@@ -1351,7 +1349,8 @@ _awaitRSHostViaRSMonitor = function(hostAddr, desiredState, rsName, timeout) {
desiredState = {ok: true};
}
- print("Awaiting " + hostAddr + " to be " + tojson(desiredState) + " in " + " rs " + rsName);
+ print("Awaiting " + hostAddr + " to be " + tojson(desiredState) + " in " +
+ " rs " + rsName);
var tests = 0;
assert.soon(
@@ -1387,8 +1386,8 @@ _awaitRSHostViaRSMonitor = function(hostAddr, desiredState, rsName, timeout) {
}
return false;
},
- "timed out waiting for replica set member: " + hostAddr + " to reach state: " +
- tojson(desiredState),
+ "timed out waiting for replica set member: " + hostAddr +
+ " to reach state: " + tojson(desiredState),
timeout);
};
@@ -1700,35 +1699,52 @@ help = shellHelper.help = function(x) {
print("\t returns a connection to the new server");
return;
} else if (x == "") {
- print("\t" + "db.help() help on db methods");
- print("\t" + "db.mycoll.help() help on collection methods");
- print("\t" + "sh.help() sharding helpers");
- print("\t" + "rs.help() replica set helpers");
- print("\t" + "help admin administrative help");
- print("\t" + "help connect connecting to a db help");
- print("\t" + "help keys key shortcuts");
- print("\t" + "help misc misc things to know");
- print("\t" + "help mr mapreduce");
+ print("\t" +
+ "db.help() help on db methods");
+ print("\t" +
+ "db.mycoll.help() help on collection methods");
+ print("\t" +
+ "sh.help() sharding helpers");
+ print("\t" +
+ "rs.help() replica set helpers");
+ print("\t" +
+ "help admin administrative help");
+ print("\t" +
+ "help connect connecting to a db help");
+ print("\t" +
+ "help keys key shortcuts");
+ print("\t" +
+ "help misc misc things to know");
+ print("\t" +
+ "help mr mapreduce");
print();
- print("\t" + "show dbs show database names");
- print("\t" + "show collections show collections in current database");
- print("\t" + "show users show users in current database");
+ print("\t" +
+ "show dbs show database names");
+ print("\t" +
+ "show collections show collections in current database");
+ print("\t" +
+ "show users show users in current database");
print(
"\t" +
"show profile show most recent system.profile entries with time >= 1ms");
- print("\t" + "show logs show the accessible logger names");
+ print("\t" +
+ "show logs show the accessible logger names");
print(
"\t" +
"show log [name] prints out the last segment of log in memory, 'global' is default");
- print("\t" + "use <db_name> set current database");
- print("\t" + "db.foo.find() list objects in collection foo");
- print("\t" + "db.foo.find( { a : 1 } ) list objects in foo where a == 1");
+ print("\t" +
+ "use <db_name> set current database");
+ print("\t" +
+ "db.foo.find() list objects in collection foo");
+ print("\t" +
+ "db.foo.find( { a : 1 } ) list objects in foo where a == 1");
print(
"\t" +
"it result of the last line evaluated; use to further iterate");
print("\t" +
"DBQuery.shellBatchSize = x set default number of items to display on shell");
- print("\t" + "exit quit the mongo shell");
+ print("\t" +
+ "exit quit the mongo shell");
} else
print("unknown help option");
};
diff --git a/src/mongo/shell/utils_auth.js b/src/mongo/shell/utils_auth.js
index 0343a81ceef..9beb08db940 100644
--- a/src/mongo/shell/utils_auth.js
+++ b/src/mongo/shell/utils_auth.js
@@ -1,146 +1,146 @@
var authutil;
(function() {
- assert(!authutil);
- authutil = {};
+assert(!authutil);
+authutil = {};
- /**
- * Logs out all connections "conn" from database "dbname".
- */
- authutil.logout = function(conn, dbname) {
- var i;
- if (null == conn.length) {
- conn = [conn];
- }
- for (i = 0; i < conn.length; ++i) {
- var curDB = new DB(conn[i], dbname);
- curDB.logout();
- }
- };
-
- /**
- * Authenticates all connections in "conns" using "authParams" on database "dbName".
- *
- * Raises an exception if any authentication fails, and tries to leave all connnections
- * in "conns" in the logged-out-of-dbName state.
- */
- authutil.assertAuthenticate = function(conns, dbName, authParams) {
- var conn, i, ex, ex2;
- if (conns.length == null)
- conns = [conns];
-
- try {
- for (i = 0; i < conns.length; ++i) {
- conn = conns[i];
- // Bypass the implicit auth call in getDB();
- var db = new DB(conn, dbName);
- try {
- retryOnNetworkError(db._authOrThrow.bind(db, authParams));
- } catch (ex3) {
- doassert("assert failed : " + "Failed to authenticate " + conn + " to " +
- dbName + " using parameters " + tojson(authParams) + " : " + ex3);
- }
- }
- } catch (ex) {
- try {
- authutil.logout(conns, dbName);
- } catch (ex2) {
- }
- throw ex;
- }
- };
+/**
+ * Logs out all connections "conn" from database "dbname".
+ */
+authutil.logout = function(conn, dbname) {
+ var i;
+ if (null == conn.length) {
+ conn = [conn];
+ }
+ for (i = 0; i < conn.length; ++i) {
+ var curDB = new DB(conn[i], dbname);
+ curDB.logout();
+ }
+};
- /**
- * Authenticates all connections in "conns" using "authParams" on database "dbName".
- * Raises in exception if any of the authentications succeed.
- */
- authutil.assertAuthenticateFails = function(conns, dbName, authParams) {
- var conn, i;
- if (conns.length == null)
- conns = [conns];
+/**
+ * Authenticates all connections in "conns" using "authParams" on database "dbName".
+ *
+ * Raises an exception if any authentication fails, and tries to leave all connnections
+ * in "conns" in the logged-out-of-dbName state.
+ */
+authutil.assertAuthenticate = function(conns, dbName, authParams) {
+ var conn, i, ex, ex2;
+ if (conns.length == null)
+ conns = [conns];
+ try {
for (i = 0; i < conns.length; ++i) {
conn = conns[i];
// Bypass the implicit auth call in getDB();
var db = new DB(conn, dbName);
- const ex = assert.throws(retryOnNetworkError,
- [db._authOrThrow.bind(db, authParams)],
- "Unexpectedly authenticated " + conn + " to " + dbName +
- " using parameters " + tojson(authParams));
- if (isNetworkError(ex)) {
- throw ex;
+ try {
+ retryOnNetworkError(db._authOrThrow.bind(db, authParams));
+ } catch (ex3) {
+ doassert("assert failed : " +
+ "Failed to authenticate " + conn + " to " + dbName + " using parameters " +
+ tojson(authParams) + " : " + ex3);
}
}
- };
+ } catch (ex) {
+ try {
+ authutil.logout(conns, dbName);
+ } catch (ex2) {
+ }
+ throw ex;
+ }
+};
- /**
- * Executes action() after authenticating the keyfile user on "conn", then logs out the keyfile
- * user.
- */
- authutil.asCluster = function(conn, keyfile, action) {
- var ex;
- const authMode = jsTest.options().clusterAuthMode;
+/**
+ * Authenticates all connections in "conns" using "authParams" on database "dbName".
+ * Raises in exception if any of the authentications succeed.
+ */
+authutil.assertAuthenticateFails = function(conns, dbName, authParams) {
+ var conn, i;
+ if (conns.length == null)
+ conns = [conns];
- // put a connection in an array for uniform processing.
- let connArray = conn;
- if (conn.length == null)
- connArray = [conn];
+ for (i = 0; i < conns.length; ++i) {
+ conn = conns[i];
+ // Bypass the implicit auth call in getDB();
+ var db = new DB(conn, dbName);
+ const ex = assert.throws(retryOnNetworkError,
+ [db._authOrThrow.bind(db, authParams)],
+ "Unexpectedly authenticated " + conn + " to " + dbName +
+ " using parameters " + tojson(authParams));
+ if (isNetworkError(ex)) {
+ throw ex;
+ }
+ }
+};
- let clusterTimes = connArray.map(connElem => {
- const connClusterTime = connElem.getClusterTime();
- const sessionClusterTime = connElem._getDefaultSession().getClusterTime();
- const operationTime = connElem._getDefaultSession().getOperationTime();
+/**
+ * Executes action() after authenticating the keyfile user on "conn", then logs out the keyfile
+ * user.
+ */
+authutil.asCluster = function(conn, keyfile, action) {
+ var ex;
+ const authMode = jsTest.options().clusterAuthMode;
- connElem.resetClusterTime_forTesting();
- connElem._getDefaultSession().resetClusterTime_forTesting();
- connElem._getDefaultSession().resetOperationTime_forTesting();
+ // put a connection in an array for uniform processing.
+ let connArray = conn;
+ if (conn.length == null)
+ connArray = [conn];
- return {connClusterTime, sessionClusterTime, operationTime};
- });
+ let clusterTimes = connArray.map(connElem => {
+ const connClusterTime = connElem.getClusterTime();
+ const sessionClusterTime = connElem._getDefaultSession().getClusterTime();
+ const operationTime = connElem._getDefaultSession().getOperationTime();
- if (authMode === 'keyFile') {
- authutil.assertAuthenticate(conn, 'admin', {
- user: '__system',
- mechanism: 'SCRAM-SHA-1',
- pwd: cat(keyfile).replace(/[\011-\015\040]/g, '')
- });
- } else if (authMode === 'x509') {
- authutil.assertAuthenticate(conn, '$external', {
- mechanism: 'MONGODB-X509',
- });
- } else {
- throw new Error('clusterAuthMode ' + authMode + ' is currently unsupported');
- }
+ connElem.resetClusterTime_forTesting();
+ connElem._getDefaultSession().resetClusterTime_forTesting();
+ connElem._getDefaultSession().resetOperationTime_forTesting();
+
+ return {connClusterTime, sessionClusterTime, operationTime};
+ });
+
+ if (authMode === 'keyFile') {
+ authutil.assertAuthenticate(conn, 'admin', {
+ user: '__system',
+ mechanism: 'SCRAM-SHA-1',
+ pwd: cat(keyfile).replace(/[\011-\015\040]/g, '')
+ });
+ } else if (authMode === 'x509') {
+ authutil.assertAuthenticate(conn, '$external', {
+ mechanism: 'MONGODB-X509',
+ });
+ } else {
+ throw new Error('clusterAuthMode ' + authMode + ' is currently unsupported');
+ }
+ try {
+ return action();
+ } finally {
try {
- return action();
- } finally {
- try {
- authutil.logout(conn, 'admin');
- let connArray = conn;
- if (conn.length == null)
- connArray = [conn];
+ authutil.logout(conn, 'admin');
+ let connArray = conn;
+ if (conn.length == null)
+ connArray = [conn];
- for (let i = 0; i < connArray.length; i++) {
- let connElem = connArray[i];
- connElem.resetClusterTime_forTesting();
- connElem._getDefaultSession().resetClusterTime_forTesting();
- connElem._getDefaultSession().resetOperationTime_forTesting();
- if (clusterTimes[i].connClusterTime) {
- connElem.advanceClusterTime(clusterTimes[i].connClusterTime);
- }
- if (clusterTimes[i].sessionClusterTime) {
- connElem._getDefaultSession().advanceClusterTime(
- clusterTimes[i].sessionClusterTime);
- }
- if (clusterTimes[i].operationTime) {
- connElem._getDefaultSession().advanceOperationTime(
- clusterTimes[i].operationTime);
- }
+ for (let i = 0; i < connArray.length; i++) {
+ let connElem = connArray[i];
+ connElem.resetClusterTime_forTesting();
+ connElem._getDefaultSession().resetClusterTime_forTesting();
+ connElem._getDefaultSession().resetOperationTime_forTesting();
+ if (clusterTimes[i].connClusterTime) {
+ connElem.advanceClusterTime(clusterTimes[i].connClusterTime);
+ }
+ if (clusterTimes[i].sessionClusterTime) {
+ connElem._getDefaultSession().advanceClusterTime(
+ clusterTimes[i].sessionClusterTime);
+ }
+ if (clusterTimes[i].operationTime) {
+ connElem._getDefaultSession().advanceOperationTime(
+ clusterTimes[i].operationTime);
}
- } catch (ex) {
}
+ } catch (ex) {
}
- };
-
+ }
+};
}());
diff --git a/src/mongo/shell/utils_sh.js b/src/mongo/shell/utils_sh.js
index 3c62675db7b..2ada654ce55 100644
--- a/src/mongo/shell/utils_sh.js
+++ b/src/mongo/shell/utils_sh.js
@@ -109,13 +109,13 @@ sh.enableSharding = function(dbname) {
sh.shardCollection = function(fullName, key, unique, options) {
sh._checkFullName(fullName);
assert(key, "need a key");
- assert(typeof(key) == "object", "key needs to be an object");
+ assert(typeof (key) == "object", "key needs to be an object");
var cmd = {shardCollection: fullName, key: key};
if (unique)
cmd.unique = true;
if (options) {
- if (typeof(options) !== "object") {
+ if (typeof (options) !== "object") {
throw new Error("options must be an object");
}
Object.extend(cmd, options);
@@ -140,7 +140,7 @@ sh.moveChunk = function(fullName, find, to) {
};
sh.setBalancerState = function(isOn) {
- assert(typeof(isOn) == "boolean", "Must pass boolean to setBalancerState");
+ assert(typeof (isOn) == "boolean", "Must pass boolean to setBalancerState");
if (isOn) {
return sh.startBalancer();
} else {
@@ -243,7 +243,7 @@ sh.waitForPingChange = function(activePings, timeout, interval) {
};
sh.waitForBalancer = function(wait, timeout, interval) {
- if (typeof(wait) === 'undefined') {
+ if (typeof (wait) === 'undefined') {
wait = false;
}
var initialStatus = sh._getBalancerStatus();
@@ -296,7 +296,6 @@ sh.enableBalancing = function(coll) {
* mongos )
*/
sh._lastMigration = function(ns) {
-
var coll = null;
var dbase = null;
var config = null;
@@ -480,12 +479,12 @@ sh.getRecentMigrations = function(configDB) {
var result = configDB.changelog
.aggregate([
{
- $match: {
- time: {$gt: yesterday},
- what: "moveChunk.from",
- 'details.errmsg': {$exists: false},
- 'details.note': 'success'
- }
+ $match: {
+ time: {$gt: yesterday},
+ what: "moveChunk.from",
+ 'details.errmsg': {$exists: false},
+ 'details.note': 'success'
+ }
},
{$group: {_id: {msg: "$details.errmsg"}, count: {$sum: 1}}},
{$project: {_id: {$ifNull: ["$_id.msg", "Success"]}, count: "$count"}}
@@ -497,28 +496,28 @@ sh.getRecentMigrations = function(configDB) {
configDB.changelog
.aggregate([
{
- $match: {
- time: {$gt: yesterday},
- what: "moveChunk.from",
- $or: [
- {'details.errmsg': {$exists: true}},
- {'details.note': {$ne: 'success'}}
- ]
- }
+ $match: {
+ time: {$gt: yesterday},
+ what: "moveChunk.from",
+ $or: [
+ {'details.errmsg': {$exists: true}},
+ {'details.note': {$ne: 'success'}}
+ ]
+ }
},
{
- $group: {
- _id: {msg: "$details.errmsg", from: "$details.from", to: "$details.to"},
- count: {$sum: 1}
- }
+ $group: {
+ _id: {msg: "$details.errmsg", from: "$details.from", to: "$details.to"},
+ count: {$sum: 1}
+ }
},
{
- $project: {
- _id: {$ifNull: ['$_id.msg', 'aborted']},
- from: "$_id.from",
- to: "$_id.to",
- count: "$count"
- }
+ $project: {
+ _id: {$ifNull: ['$_id.msg', 'aborted']},
+ from: "$_id.from",
+ to: "$_id.to",
+ count: "$count"
+ }
}
])
.toArray());
@@ -703,7 +702,7 @@ function printShardingStatus(configDB, verbose) {
var nonBooleanNote = function(name, value) {
// If the given value is not a boolean, return a string of the
// form " (<name>: <value>)", where <value> is converted to JSON.
- var t = typeof(value);
+ var t = typeof (value);
var s = "";
if (t != "boolean" && t != "undefined") {
s = " (" + name + ": " + tojson(value) + ")";
@@ -814,9 +813,8 @@ function printShardingSizes(configDB) {
delete out.ok;
output(4,
- tojson(chunk.min) + " -->> " + tojson(chunk.max) + " on : " +
- chunk.shard + " " + tojson(out));
-
+ tojson(chunk.min) + " -->> " + tojson(chunk.max) +
+ " on : " + chunk.shard + " " + tojson(out));
});
});
}
diff --git a/src/mongo/stdx/mutex.h b/src/mongo/stdx/mutex.h
index b75a5b56988..033a5f9b080 100644
--- a/src/mongo/stdx/mutex.h
+++ b/src/mongo/stdx/mutex.h
@@ -35,8 +35,8 @@ namespace mongo {
namespace stdx {
using ::std::mutex; // NOLINT
-using ::std::timed_mutex; // NOLINT
using ::std::recursive_mutex; // NOLINT
+using ::std::timed_mutex; // NOLINT
using ::std::adopt_lock_t; // NOLINT
using ::std::defer_lock_t; // NOLINT
diff --git a/src/mongo/stdx/thread.h b/src/mongo/stdx/thread.h
index 278b7678e72..2968e9dcae2 100644
--- a/src/mongo/stdx/thread.h
+++ b/src/mongo/stdx/thread.h
@@ -57,8 +57,8 @@ namespace stdx {
*/
class thread : private ::std::thread { // NOLINT
public:
- using ::std::thread::native_handle_type; // NOLINT
using ::std::thread::id; // NOLINT
+ using ::std::thread::native_handle_type; // NOLINT
thread() noexcept : ::std::thread::thread() {} // NOLINT
@@ -91,13 +91,13 @@ public:
::std::thread::operator=(static_cast<::std::thread&&>(std::move(other)))); // NOLINT
};
- using ::std::thread::joinable; // NOLINT
using ::std::thread::get_id; // NOLINT
- using ::std::thread::native_handle; // NOLINT
using ::std::thread::hardware_concurrency; // NOLINT
+ using ::std::thread::joinable; // NOLINT
+ using ::std::thread::native_handle; // NOLINT
- using ::std::thread::join; // NOLINT
using ::std::thread::detach; // NOLINT
+ using ::std::thread::join; // NOLINT
void swap(thread& other) noexcept {
::std::thread::swap(static_cast<::std::thread&>(other)); // NOLINT
diff --git a/src/mongo/stdx/variant.h b/src/mongo/stdx/variant.h
index c2d396a7c27..c6c903d6402 100644
--- a/src/mongo/stdx/variant.h
+++ b/src/mongo/stdx/variant.h
@@ -33,16 +33,16 @@
namespace mongo::stdx {
-using ::mpark::variant;
-using ::mpark::visit;
-using ::mpark::holds_alternative;
using ::mpark::get;
using ::mpark::get_if;
+using ::mpark::holds_alternative;
+using ::mpark::variant;
+using ::mpark::visit;
-using ::mpark::variant_size;
-using ::mpark::variant_size_v;
using ::mpark::variant_alternative;
using ::mpark::variant_alternative_t;
+using ::mpark::variant_size;
+using ::mpark::variant_size_v;
constexpr auto variant_npos = ::mpark::variant_npos;
@@ -53,7 +53,7 @@ using ::mpark::operator>;
using ::mpark::operator<=;
using ::mpark::operator>=;
-using ::mpark::monostate;
using ::mpark::bad_variant_access;
+using ::mpark::monostate;
} // namespace mongo::stdx
diff --git a/src/mongo/tools/mongobridge_options.h b/src/mongo/tools/mongobridge_options.h
index 590c84e1abd..4fac287f976 100644
--- a/src/mongo/tools/mongobridge_options.h
+++ b/src/mongo/tools/mongobridge_options.h
@@ -68,4 +68,4 @@ bool handlePreValidationMongoBridgeOptions(const moe::Environment& params);
Status storeMongoBridgeOptions(const moe::Environment& params,
const std::vector<std::string>& args);
-}
+} // namespace mongo
diff --git a/src/mongo/tools/mongobridge_options_init.cpp b/src/mongo/tools/mongobridge_options_init.cpp
index a80336cc5da..d1373ccd7a2 100644
--- a/src/mongo/tools/mongobridge_options_init.cpp
+++ b/src/mongo/tools/mongobridge_options_init.cpp
@@ -69,4 +69,4 @@ MONGO_STARTUP_OPTIONS_STORE(MongoBridgeOptions)(InitializerContext* context) {
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/tools/mongoebench_options.cpp b/src/mongo/tools/mongoebench_options.cpp
index b4f25b89cf5..0ba4394d976 100644
--- a/src/mongo/tools/mongoebench_options.cpp
+++ b/src/mongo/tools/mongoebench_options.cpp
@@ -125,8 +125,7 @@ Status storeMongoeBenchOptions(const moe::Environment& params,
if (!parentPath.empty() && !boost::filesystem::exists(parentPath)) {
return {ErrorCodes::NonExistentPath,
str::stream() << "Directory containing output file must already exist, but "
- << parentPath.string()
- << " wasn't found"};
+ << parentPath.string() << " wasn't found"};
}
return Status::OK();
diff --git a/src/mongo/transport/baton_asio_linux.h b/src/mongo/transport/baton_asio_linux.h
index a24b8f93649..3536bc16ab4 100644
--- a/src/mongo/transport/baton_asio_linux.h
+++ b/src/mongo/transport/baton_asio_linux.h
@@ -83,8 +83,8 @@ class TransportLayerASIO::BatonASIO : public NetworkingBaton {
EventFDHolder() : fd(::eventfd(0, EFD_CLOEXEC)) {
if (fd < 0) {
auto e = errno;
- std::string reason = str::stream() << "error in creating eventfd: "
- << errnoWithDescription(e);
+ std::string reason = str::stream()
+ << "error in creating eventfd: " << errnoWithDescription(e);
auto code = (e == EMFILE || e == ENFILE) ? ErrorCodes::TooManyFilesOpen
: ErrorCodes::UnknownError;
@@ -165,7 +165,7 @@ public:
}
_safeExecute(std::move(lk),
- [ id, expiration, promise = std::move(pf.promise), this ]() mutable {
+ [id, expiration, promise = std::move(pf.promise), this]() mutable {
auto iter = _timers.emplace(std::piecewise_construct,
std::forward_as_tuple(expiration),
std::forward_as_tuple(id, std::move(promise)));
@@ -381,7 +381,7 @@ private:
}
_safeExecute(std::move(lk),
- [ id, fd, type, promise = std::move(pf.promise), this ]() mutable {
+ [id, fd, type, promise = std::move(pf.promise), this]() mutable {
_sessions[id] = TransportSession{fd, type, std::move(promise)};
});
@@ -440,7 +440,7 @@ private:
template <typename Callback>
void _safeExecute(stdx::unique_lock<stdx::mutex> lk, Callback&& cb) {
if (_inPoll) {
- _scheduled.push_back([ cb = std::forward<Callback>(cb), this ](Status) mutable {
+ _scheduled.push_back([cb = std::forward<Callback>(cb), this](Status) mutable {
stdx::lock_guard<stdx::mutex> lk(_mutex);
cb();
});
diff --git a/src/mongo/transport/max_conns_override_test.cpp b/src/mongo/transport/max_conns_override_test.cpp
index c1b38421efd..40c67fdb408 100644
--- a/src/mongo/transport/max_conns_override_test.cpp
+++ b/src/mongo/transport/max_conns_override_test.cpp
@@ -90,4 +90,4 @@ TEST(MaxConnsOverride, UNIXPaths) {
#endif
} // namespace
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/transport/message_compressor_manager_test.cpp b/src/mongo/transport/message_compressor_manager_test.cpp
index 7e0868ff2c2..24f877a3719 100644
--- a/src/mongo/transport/message_compressor_manager_test.cpp
+++ b/src/mongo/transport/message_compressor_manager_test.cpp
@@ -197,8 +197,9 @@ TEST(MessageCompressorManager, BadCompressionRequested) {
}
TEST(MessageCompressorManager, BadAndGoodCompressionRequested) {
- auto input = BSON("isMaster" << 1 << "compression" << BSON_ARRAY("fakecompressor"
- << "noop"));
+ auto input = BSON("isMaster" << 1 << "compression"
+ << BSON_ARRAY("fakecompressor"
+ << "noop"));
checkServerNegotiation(input, {"noop"});
}
diff --git a/src/mongo/transport/message_compressor_registry.h b/src/mongo/transport/message_compressor_registry.h
index 160475675ae..4e20c2dad1c 100644
--- a/src/mongo/transport/message_compressor_registry.h
+++ b/src/mongo/transport/message_compressor_registry.h
@@ -45,7 +45,7 @@ namespace mongo {
namespace optionenvironment {
class OptionSection;
class Environment;
-} // namespace option environment
+} // namespace optionenvironment
namespace moe = mongo::optionenvironment;
diff --git a/src/mongo/transport/service_entry_point.h b/src/mongo/transport/service_entry_point.h
index 3faa834ea97..2c3ded7849a 100644
--- a/src/mongo/transport/service_entry_point.h
+++ b/src/mongo/transport/service_entry_point.h
@@ -65,8 +65,8 @@ public:
virtual Status start() = 0;
/**
- * Shuts down the service entry point.
- */
+ * Shuts down the service entry point.
+ */
virtual bool shutdown(Milliseconds timeout) = 0;
/**
@@ -75,8 +75,8 @@ public:
virtual void appendStats(BSONObjBuilder* bob) const = 0;
/**
- * Returns the number of sessions currently open.
- */
+ * Returns the number of sessions currently open.
+ */
virtual size_t numOpenSessions() const = 0;
/**
diff --git a/src/mongo/transport/service_entry_point_impl.cpp b/src/mongo/transport/service_entry_point_impl.cpp
index 64d4cef9c80..5076b21b9d7 100644
--- a/src/mongo/transport/service_entry_point_impl.cpp
+++ b/src/mongo/transport/service_entry_point_impl.cpp
@@ -167,7 +167,7 @@ void ServiceEntryPointImpl::startSession(transport::SessionHandle session) {
<< connectionCount << word << " now open)";
}
- ssm->setCleanupHook([ this, ssmIt, quiet, session = std::move(session) ] {
+ ssm->setCleanupHook([this, ssmIt, quiet, session = std::move(session)] {
size_t connectionCount;
auto remote = session->remote();
{
@@ -223,8 +223,8 @@ bool ServiceEntryPointImpl::shutdown(Milliseconds timeout) {
auto noWorkersLeft = [this] { return numOpenSessions() == 0; };
while (timeSpent < timeout &&
!_shutdownCondition.wait_for(lk, checkInterval.toSystemDuration(), noWorkersLeft)) {
- log(LogComponent::kNetwork) << "shutdown: still waiting on " << numOpenSessions()
- << " active workers to drain... ";
+ log(LogComponent::kNetwork)
+ << "shutdown: still waiting on " << numOpenSessions() << " active workers to drain... ";
timeSpent += checkInterval;
}
diff --git a/src/mongo/transport/service_executor_adaptive.cpp b/src/mongo/transport/service_executor_adaptive.cpp
index e6f79d50b09..3f35fe07c78 100644
--- a/src/mongo/transport/service_executor_adaptive.cpp
+++ b/src/mongo/transport/service_executor_adaptive.cpp
@@ -183,34 +183,35 @@ Status ServiceExecutorAdaptive::schedule(ServiceExecutorAdaptive::Task task,
}
auto wrappedTask =
- [ this, task = std::move(task), scheduleTime, pendingCounterPtr, taskName, flags ](
+ [this, task = std::move(task), scheduleTime, pendingCounterPtr, taskName, flags](
auto status) {
- pendingCounterPtr->subtractAndFetch(1);
- auto start = _tickSource->getTicks();
- _totalSpentQueued.addAndFetch(start - scheduleTime);
+ pendingCounterPtr->subtractAndFetch(1);
+ auto start = _tickSource->getTicks();
+ _totalSpentQueued.addAndFetch(start - scheduleTime);
- _localThreadState->threadMetrics[static_cast<size_t>(taskName)]
- ._totalSpentQueued.addAndFetch(start - scheduleTime);
+ _localThreadState->threadMetrics[static_cast<size_t>(taskName)]
+ ._totalSpentQueued.addAndFetch(start - scheduleTime);
- if (_localThreadState->recursionDepth++ == 0) {
- _localThreadState->executing.markRunning();
- _threadsInUse.addAndFetch(1);
- }
- const auto guard = makeGuard([this, taskName] {
- if (--_localThreadState->recursionDepth == 0) {
- _localThreadState->executingCurRun += _localThreadState->executing.markStopped();
- _threadsInUse.subtractAndFetch(1);
+ if (_localThreadState->recursionDepth++ == 0) {
+ _localThreadState->executing.markRunning();
+ _threadsInUse.addAndFetch(1);
}
- _totalExecuted.addAndFetch(1);
+ const auto guard = makeGuard([this, taskName] {
+ if (--_localThreadState->recursionDepth == 0) {
+ _localThreadState->executingCurRun +=
+ _localThreadState->executing.markStopped();
+ _threadsInUse.subtractAndFetch(1);
+ }
+ _totalExecuted.addAndFetch(1);
+ _localThreadState->threadMetrics[static_cast<size_t>(taskName)]
+ ._totalExecuted.addAndFetch(1);
+ });
+
+ TickTimer _localTimer(_tickSource);
+ task();
_localThreadState->threadMetrics[static_cast<size_t>(taskName)]
- ._totalExecuted.addAndFetch(1);
- });
-
- TickTimer _localTimer(_tickSource);
- task();
- _localThreadState->threadMetrics[static_cast<size_t>(taskName)]
- ._totalSpentExecuting.addAndFetch(_localTimer.sinceStartTicks());
- };
+ ._totalSpentExecuting.addAndFetch(_localTimer.sinceStartTicks());
+ };
// Dispatching a task on the io_context will run the task immediately, and may run it
// on the current thread (if the current thread is running the io_context right now).
diff --git a/src/mongo/transport/service_executor_adaptive_test.cpp b/src/mongo/transport/service_executor_adaptive_test.cpp
index 70d234ad356..8e27d91549d 100644
--- a/src/mongo/transport/service_executor_adaptive_test.cpp
+++ b/src/mongo/transport/service_executor_adaptive_test.cpp
@@ -253,10 +253,10 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckThreads) {
}
/*
-* This tests that the executor will launch more threads when starvation is detected. We launch
-* another task from itself so there will always be a queue of a waiting task if there's just one
-* thread.
-*/
+ * This tests that the executor will launch more threads when starvation is detected. We launch
+ * another task from itself so there will always be a queue of a waiting task if there's just one
+ * thread.
+ */
TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
auto exec = makeAndStartExecutor<TestOptions>();
@@ -269,7 +269,6 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
std::function<void()> task;
task = [this, &task, &exec, &scheduleMutex, &scheduleNew] {
-
// This sleep needs to be larger than the sleep below to be able to limit the amount of
// starvation.
stdx::this_thread::sleep_for(config->maxQueueLatency().toSystemDuration() * 5);
@@ -304,9 +303,9 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
}
/*
-* This tests that the executor can execute tasks recursively. If it can't starvation will be
-* detected and new threads started.
-*/
+ * This tests that the executor can execute tasks recursively. If it can't starvation will be
+ * detected and new threads started.
+ */
TEST_F(ServiceExecutorAdaptiveFixture, TestRecursion) {
auto exec = makeAndStartExecutor<RecursionOptions>();
diff --git a/src/mongo/transport/service_executor_synchronous.cpp b/src/mongo/transport/service_executor_synchronous.cpp
index 4191899e763..79fc88e0033 100644
--- a/src/mongo/transport/service_executor_synchronous.cpp
+++ b/src/mongo/transport/service_executor_synchronous.cpp
@@ -115,7 +115,7 @@ Status ServiceExecutorSynchronous::schedule(Task task,
// into the thread local job queue.
LOG(3) << "Starting new executor thread in passthrough mode";
- Status status = launchServiceWorkerThread([ this, task = std::move(task) ] {
+ Status status = launchServiceWorkerThread([this, task = std::move(task)] {
_numRunningWorkerThreads.addAndFetch(1);
_localWorkQueue.emplace_back(std::move(task));
diff --git a/src/mongo/transport/service_executor_test.cpp b/src/mongo/transport/service_executor_test.cpp
index b6674f6b2c7..a7482e09f17 100644
--- a/src/mongo/transport/service_executor_test.cpp
+++ b/src/mongo/transport/service_executor_test.cpp
@@ -51,7 +51,7 @@ namespace {
constexpr Milliseconds kWorkerThreadRunTime{1000};
// Run time + generous scheduling time slice
const Milliseconds kShutdownTime = kWorkerThreadRunTime + Milliseconds{50};
-}
+} // namespace
struct TestOptions : public ServiceExecutorAdaptive::Options {
int reservedThreads() const final {
diff --git a/src/mongo/transport/service_state_machine.cpp b/src/mongo/transport/service_state_machine.cpp
index bffc44be413..6d4090037a5 100644
--- a/src/mongo/transport/service_state_machine.cpp
+++ b/src/mongo/transport/service_state_machine.cpp
@@ -555,7 +555,7 @@ void ServiceStateMachine::_scheduleNextWithGuard(ThreadGuard guard,
transport::ServiceExecutor::ScheduleFlags flags,
transport::ServiceExecutorTaskName taskName,
Ownership ownershipModel) {
- auto func = [ ssm = shared_from_this(), ownershipModel ] {
+ auto func = [ssm = shared_from_this(), ownershipModel] {
ThreadGuard guard(ssm.get());
if (ownershipModel == Ownership::kStatic)
guard.markStaticOwnership();
diff --git a/src/mongo/transport/service_state_machine_test.cpp b/src/mongo/transport/service_state_machine_test.cpp
index 4c96ea13b01..02447e5a289 100644
--- a/src/mongo/transport/service_state_machine_test.cpp
+++ b/src/mongo/transport/service_state_machine_test.cpp
@@ -259,8 +259,9 @@ public:
if (!_scheduleHook) {
return Status::OK();
} else {
- return _scheduleHook(std::move(task)) ? Status::OK() : Status{ErrorCodes::InternalError,
- "Hook returned error!"};
+ return _scheduleHook(std::move(task))
+ ? Status::OK()
+ : Status{ErrorCodes::InternalError, "Hook returned error!"};
}
}
@@ -497,10 +498,10 @@ TEST_F(ServiceStateMachineFixture, TestGetMoreWithExhaustAndEmptyResponseNamespa
Message getMoreWithExhaust = getMoreRequestWithExhaust(nss, cursorId, initRequestId);
// Construct a 'getMore' response with an empty namespace.
- BSONObj getMoreTerminalResBody = BSON("ok" << 1 << "cursor" << BSON("id" << 42 << "ns"
- << ""
- << "nextBatch"
- << BSONArray()));
+ BSONObj getMoreTerminalResBody = BSON("ok" << 1 << "cursor"
+ << BSON("id" << 42 << "ns"
+ << ""
+ << "nextBatch" << BSONArray()));
Message getMoreTerminalRes = buildOpMsg(getMoreTerminalResBody);
// Let the 'getMore' request be sourced from the network, processed in the database, and
@@ -794,7 +795,7 @@ TEST_F(ServiceStateMachineFixture, TerminateWorksForAllStatesWithScheduleFailure
waitFor = testState;
// This is a dummy thread that just advances the SSM while we track its state/kill it
- stdx::thread runner([ ssm = _ssm, &scheduleFailed ] {
+ stdx::thread runner([ssm = _ssm, &scheduleFailed] {
while (ssm->state() != State::Ended && !scheduleFailed) {
ssm->runNext();
}
diff --git a/src/mongo/transport/session.h b/src/mongo/transport/session.h
index 66fddd5d244..6f72aca8127 100644
--- a/src/mongo/transport/session.h
+++ b/src/mongo/transport/session.h
@@ -124,13 +124,13 @@ public:
virtual void cancelAsyncOperations(const BatonHandle& handle = nullptr) = 0;
/**
- * This should only be used to detect when the remote host has disappeared without
- * notice. It does NOT work correctly for ensuring that operations complete or fail
- * by some deadline.
- *
- * This timeout will only effect calls sourceMessage()/sinkMessage(). Async operations do not
- * currently support timeouts.
- */
+ * This should only be used to detect when the remote host has disappeared without
+ * notice. It does NOT work correctly for ensuring that operations complete or fail
+ * by some deadline.
+ *
+ * This timeout will only effect calls sourceMessage()/sinkMessage(). Async operations do not
+ * currently support timeouts.
+ */
virtual void setTimeout(boost::optional<Milliseconds> timeout) = 0;
/**
diff --git a/src/mongo/transport/session_asio.h b/src/mongo/transport/session_asio.h
index 99a0804ecb8..484b11f3331 100644
--- a/src/mongo/transport/session_asio.h
+++ b/src/mongo/transport/session_asio.h
@@ -248,7 +248,6 @@ protected:
SSLPeerInfo::forSession(shared_from_this()) =
uassertStatusOK(getSSLManager()->parseAndValidatePeerCertificate(
_sslSocket->native_handle(), target.host(), target));
-
});
}
@@ -354,7 +353,7 @@ private:
auto headerBuffer = SharedBuffer::allocate(kHeaderSize);
auto ptr = headerBuffer.get();
return read(asio::buffer(ptr, kHeaderSize), baton)
- .then([ headerBuffer = std::move(headerBuffer), this, baton ]() mutable {
+ .then([headerBuffer = std::move(headerBuffer), this, baton]() mutable {
if (checkForHTTPRequest(asio::buffer(headerBuffer.get(), kHeaderSize))) {
return sendHTTPResponse(baton);
}
@@ -383,7 +382,7 @@ private:
MsgData::View msgView(buffer.get());
return read(asio::buffer(msgView.data(), msgView.dataLen()), baton)
- .then([ this, buffer = std::move(buffer), msgLen ]() mutable {
+ .then([this, buffer = std::move(buffer), msgLen]() mutable {
if (_isIngressSession) {
networkCounter.hitPhysicalIn(msgLen);
}
diff --git a/src/mongo/transport/transport_layer_asio.cpp b/src/mongo/transport/transport_layer_asio.cpp
index 41f00e9b677..cad586f38d6 100644
--- a/src/mongo/transport/transport_layer_asio.cpp
+++ b/src/mongo/transport/transport_layer_asio.cpp
@@ -457,8 +457,9 @@ StatusWith<SessionHandle> TransportLayerASIO::connect(HostAndPort peer,
#else
auto globalSSLMode = _sslMode();
if (sslMode == kEnableSSL ||
- (sslMode == kGlobalSSLMode && ((globalSSLMode == SSLParams::SSLMode_preferSSL) ||
- (globalSSLMode == SSLParams::SSLMode_requireSSL)))) {
+ (sslMode == kGlobalSSLMode &&
+ ((globalSSLMode == SSLParams::SSLMode_preferSSL) ||
+ (globalSSLMode == SSLParams::SSLMode_requireSSL)))) {
auto sslStatus = session->handshakeSSLForEgress(peer).getNoThrow();
if (!sslStatus.isOK()) {
return sslStatus;
@@ -606,8 +607,9 @@ Future<SessionHandle> TransportLayerASIO::asyncConnect(HostAndPort peer,
#else
auto globalSSLMode = _sslMode();
if (sslMode == kEnableSSL ||
- (sslMode == kGlobalSSLMode && ((globalSSLMode == SSLParams::SSLMode_preferSSL) ||
- (globalSSLMode == SSLParams::SSLMode_requireSSL)))) {
+ (sslMode == kGlobalSSLMode &&
+ ((globalSSLMode == SSLParams::SSLMode_preferSSL) ||
+ (globalSSLMode == SSLParams::SSLMode_requireSSL)))) {
return connector->session
->handshakeSSLForEgressWithLock(std::move(lk), connector->peer)
.then([connector] { return Status::OK(); });
diff --git a/src/mongo/transport/transport_layer_asio_integration_test.cpp b/src/mongo/transport/transport_layer_asio_integration_test.cpp
index 718a0bd56a0..b9bfa10df5e 100644
--- a/src/mongo/transport/transport_layer_asio_integration_test.cpp
+++ b/src/mongo/transport/transport_layer_asio_integration_test.cpp
@@ -65,9 +65,10 @@ TEST(TransportLayerASIO, HTTPRequestGetsHTTPError) {
log() << "Sending HTTP request";
std::string httpReq = str::stream() << "GET /\r\n"
"Host: "
- << server << "\r\n"
- "User-Agent: MongoDB Integration test\r\n"
- "Accept: */*";
+ << server
+ << "\r\n"
+ "User-Agent: MongoDB Integration test\r\n"
+ "Accept: */*";
asio::write(socket, asio::buffer(httpReq.data(), httpReq.size()));
log() << "Waiting for response";
diff --git a/src/mongo/transport/transport_layer_asio_test.cpp b/src/mongo/transport/transport_layer_asio_test.cpp
index bc995283097..08dcd99dcae 100644
--- a/src/mongo/transport/transport_layer_asio_test.cpp
+++ b/src/mongo/transport/transport_layer_asio_test.cpp
@@ -236,7 +236,7 @@ public:
void startSession(transport::SessionHandle session) override {
log() << "Accepted connection from " << session->remote();
- startWorkerThread([ this, session = std::move(session) ]() mutable {
+ startWorkerThread([this, session = std::move(session)]() mutable {
log() << "waiting for message";
session->setTimeout(Milliseconds{500});
auto status = session->sourceMessage().getStatus();
@@ -332,7 +332,7 @@ class TimeoutSwitchModesSEP : public TimeoutSEP {
public:
void startSession(transport::SessionHandle session) override {
log() << "Accepted connection from " << session->remote();
- startWorkerThread([ this, session = std::move(session) ]() mutable {
+ startWorkerThread([this, session = std::move(session)]() mutable {
log() << "waiting for message";
auto sourceMessage = [&] { return session->sourceMessage().getStatus(); };
diff --git a/src/mongo/transport/transport_layer_egress_init.cpp b/src/mongo/transport/transport_layer_egress_init.cpp
index 062d0d284d8..8be5bd39735 100644
--- a/src/mongo/transport/transport_layer_egress_init.cpp
+++ b/src/mongo/transport/transport_layer_egress_init.cpp
@@ -44,7 +44,6 @@ namespace {
ServiceContext::ConstructorActionRegisterer registerEgressTransportLayer{
"ConfigureEgressTransportLayer", [](ServiceContext* sc) {
-
invariant(!sc->getTransportLayer());
transport::TransportLayerASIO::Options opts;
opts.mode = transport::TransportLayerASIO::Options::kEgress;
diff --git a/src/mongo/unittest/system_resource_canary_bm.cpp b/src/mongo/unittest/system_resource_canary_bm.cpp
index f2f09bc162b..945b46b7b22 100644
--- a/src/mongo/unittest/system_resource_canary_bm.cpp
+++ b/src/mongo/unittest/system_resource_canary_bm.cpp
@@ -94,7 +94,7 @@ class CacheLatencyTest : public benchmark::Fixture {
// Fixture for CPU Cache and RAM latency test. Adapted from lmbench's lat_mem_rd test.
public:
// Array of pointers used as a linked list.
- std::unique_ptr<char* []> data;
+ std::unique_ptr<char*[]> data;
void SetUp(benchmark::State& state) override {
if (state.thread_index == 0) {
@@ -107,7 +107,7 @@ public:
const int arrLength = state.range(0);
int counter = 0;
- data = std::make_unique<char* []>(arrLength);
+ data = std::make_unique<char*[]>(arrLength);
char** arr = data.get();
diff --git a/src/mongo/unittest/temp_dir.cpp b/src/mongo/unittest/temp_dir.cpp
index a4a91fa8a90..8b370dfd868 100644
--- a/src/mongo/unittest/temp_dir.cpp
+++ b/src/mongo/unittest/temp_dir.cpp
@@ -73,7 +73,7 @@ MONGO_INITIALIZER(SetTempDirDefaultRoot)(InitializerContext* context) {
}
return Status::OK();
}
-}
+} // namespace
TempDir::TempDir(const std::string& namePrefix) {
fassert(17146, namePrefix.find_first_of("/\\") == std::string::npos);
diff --git a/src/mongo/unittest/unittest_helpers.cpp b/src/mongo/unittest/unittest_helpers.cpp
index 10eae6c2f3f..5e6a8627dcf 100644
--- a/src/mongo/unittest/unittest_helpers.cpp
+++ b/src/mongo/unittest/unittest_helpers.cpp
@@ -42,4 +42,4 @@ std::ostream& operator<<(std::ostream& s, const Timestamp& ot) {
s << ot.toString();
return s;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/alarm.h b/src/mongo/util/alarm.h
index aa46c01f924..449284a3b21 100644
--- a/src/mongo/util/alarm.h
+++ b/src/mongo/util/alarm.h
@@ -192,4 +192,4 @@ private:
AlarmMap _alarms;
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/util/alarm_test.cpp b/src/mongo/util/alarm_test.cpp
index 1fc29b1b5f9..f450284fc2a 100644
--- a/src/mongo/util/alarm_test.cpp
+++ b/src/mongo/util/alarm_test.cpp
@@ -112,12 +112,12 @@ TEST(AlarmRunner, BasicTest) {
AtomicWord<bool> future2Filled{false};
auto pf = makePromiseFuture<void>();
- std::move(alarm2.future).getAsync([&future2Filled,
- promise = std::move(pf.promise) ](Status status) mutable {
- ASSERT_OK(status);
- future2Filled.store(true);
- promise.emplaceValue();
- });
+ std::move(alarm2.future)
+ .getAsync([&future2Filled, promise = std::move(pf.promise)](Status status) mutable {
+ ASSERT_OK(status);
+ future2Filled.store(true);
+ promise.emplaceValue();
+ });
clockSource->advance(Milliseconds(11));
diff --git a/src/mongo/util/assert_util.cpp b/src/mongo/util/assert_util.cpp
index dcf2b580d42..cb9188590b7 100644
--- a/src/mongo/util/assert_util.cpp
+++ b/src/mongo/util/assert_util.cpp
@@ -241,12 +241,11 @@ Status exceptionToStatus() noexcept {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Caught std::exception of type " << demangleName(typeid(ex))
- << ": "
- << ex.what());
+ << ": " << ex.what());
} catch (const boost::exception& ex) {
- return Status(
- ErrorCodes::UnknownError,
- str::stream() << "Caught boost::exception of type " << demangleName(typeid(ex)) << ": "
+ return Status(ErrorCodes::UnknownError,
+ str::stream()
+ << "Caught boost::exception of type " << demangleName(typeid(ex)) << ": "
<< boost::diagnostic_information(ex));
} catch (...) {
@@ -254,4 +253,4 @@ Status exceptionToStatus() noexcept {
std::terminate();
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/assert_util_test.cpp b/src/mongo/util/assert_util_test.cpp
index 25dcb159ba0..05b33ea3e4b 100644
--- a/src/mongo/util/assert_util_test.cpp
+++ b/src/mongo/util/assert_util_test.cpp
@@ -311,8 +311,8 @@ DEATH_TEST(InvariantTerminationTest,
DEATH_TEST(InvariantTerminationTest,
invariantWithStdStringMsg,
"Terminating with std::string invariant message: 12345") {
- const std::string msg = str::stream() << "Terminating with std::string invariant message: "
- << 12345;
+ const std::string msg = str::stream()
+ << "Terminating with std::string invariant message: " << 12345;
invariant(false, msg);
}
@@ -326,8 +326,8 @@ DEATH_TEST(InvariantTerminationTest,
DEATH_TEST(InvariantTerminationTest,
invariantOverloadWithStdStringMsg,
"Terminating with std::string invariant message: 12345") {
- const std::string msg = str::stream() << "Terminating with std::string invariant message: "
- << 12345;
+ const std::string msg = str::stream()
+ << "Terminating with std::string invariant message: " << 12345;
invariant(Status(ErrorCodes::InternalError, "Terminating with invariant"), msg);
}
@@ -341,8 +341,8 @@ DEATH_TEST(InvariantTerminationTest,
DEATH_TEST(InvariantTerminationTest,
invariantStatusWithOverloadWithStdStringMsg,
"Terminating with std::string invariant message: 12345") {
- const std::string msg = str::stream() << "Terminating with std::string invariant message: "
- << 12345;
+ const std::string msg = str::stream()
+ << "Terminating with std::string invariant message: " << 12345;
invariant(StatusWith<std::string>(ErrorCodes::InternalError, "Terminating with invariant"),
msg);
}
@@ -367,8 +367,8 @@ DEATH_TEST(DassertTerminationTest,
DEATH_TEST(DassertTerminationTest,
dassertWithStdStringMsg,
"Terminating with std::string dassert message: 12345") {
- const std::string msg = str::stream() << "Terminating with std::string dassert message: "
- << 12345;
+ const std::string msg = str::stream()
+ << "Terminating with std::string dassert message: " << 12345;
dassert(false, msg);
}
#endif // defined(MONGO_CONFIG_DEBUG_BUILD)
diff --git a/src/mongo/util/boost_assert_impl.cpp b/src/mongo/util/boost_assert_impl.cpp
index a541f7993da..f77ed7f2b0e 100644
--- a/src/mongo/util/boost_assert_impl.cpp
+++ b/src/mongo/util/boost_assert_impl.cpp
@@ -40,8 +40,11 @@ struct BoostAssertImpl {
invariantFailed(expr, file, line);
};
- BoostAssertFuncs::global().assertMsgFunc = [](
- char const* expr, char const* msg, char const* function, char const* file, long line) {
+ BoostAssertFuncs::global().assertMsgFunc = [](char const* expr,
+ char const* msg,
+ char const* function,
+ char const* file,
+ long line) {
invariantFailedWithMsg(expr, msg, file, line);
};
}
diff --git a/src/mongo/util/bson_util.h b/src/mongo/util/bson_util.h
index f4f3440318d..ab6c8459889 100644
--- a/src/mongo/util/bson_util.h
+++ b/src/mongo/util/bson_util.h
@@ -45,4 +45,4 @@ void bsonArrToNumVector(BSONElement el, std::vector<T>& results) {
results.push_back((T)el.Number());
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/bufreader.h b/src/mongo/util/bufreader.h
index 90e270e7f90..8c30070bada 100644
--- a/src/mongo/util/bufreader.h
+++ b/src/mongo/util/bufreader.h
@@ -136,4 +136,4 @@ private:
const char* _pos;
const char* _end;
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/checksum.h b/src/mongo/util/checksum.h
index c1e8aa73628..8d612c03e21 100644
--- a/src/mongo/util/checksum.h
+++ b/src/mongo/util/checksum.h
@@ -67,4 +67,4 @@ struct Checksum {
return words[0] != rhs.words[0] || words[1] != rhs.words[1];
}
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/clock_source_mock_test.cpp b/src/mongo/util/clock_source_mock_test.cpp
index 3861dbae4c5..f8dbdc343df 100644
--- a/src/mongo/util/clock_source_mock_test.cpp
+++ b/src/mongo/util/clock_source_mock_test.cpp
@@ -129,11 +129,10 @@ TEST(ClockSourceMockTest, AlarmScheudlesExpiredAlarmWhenSignaled) {
ClockSourceMock cs;
const auto beginning = cs.now();
int alarmFiredCount = 0;
- ASSERT_OK(cs.setAlarm(beginning + Seconds{1},
- [&] {
- ++alarmFiredCount;
- ASSERT_OK(cs.setAlarm(beginning, [&] { ++alarmFiredCount; }));
- }));
+ ASSERT_OK(cs.setAlarm(beginning + Seconds{1}, [&] {
+ ++alarmFiredCount;
+ ASSERT_OK(cs.setAlarm(beginning, [&] { ++alarmFiredCount; }));
+ }));
ASSERT_EQ(0, alarmFiredCount);
cs.advance(Seconds{1});
ASSERT_EQ(2, alarmFiredCount);
@@ -154,17 +153,15 @@ TEST(ClockSourceMockTest, AlarmScheudlesAlarmWhenSignaled) {
ClockSourceMock cs;
const auto beginning = cs.now();
int alarmFiredCount = 0;
- ASSERT_OK(cs.setAlarm(beginning + Seconds{1},
- [&] {
- ++alarmFiredCount;
- ASSERT_OK(
- cs.setAlarm(beginning + Seconds{2}, [&] { ++alarmFiredCount; }));
- }));
+ ASSERT_OK(cs.setAlarm(beginning + Seconds{1}, [&] {
+ ++alarmFiredCount;
+ ASSERT_OK(cs.setAlarm(beginning + Seconds{2}, [&] { ++alarmFiredCount; }));
+ }));
ASSERT_EQ(0, alarmFiredCount);
cs.advance(Seconds{1});
ASSERT_EQ(1, alarmFiredCount);
cs.advance(Seconds{1});
ASSERT_EQ(2, alarmFiredCount);
}
-}
+} // namespace
} // namespace mongo
diff --git a/src/mongo/util/cmdline_utils/censor_cmdline.cpp b/src/mongo/util/cmdline_utils/censor_cmdline.cpp
index 746daf24b2d..526414b21e9 100644
--- a/src/mongo/util/cmdline_utils/censor_cmdline.cpp
+++ b/src/mongo/util/cmdline_utils/censor_cmdline.cpp
@@ -233,4 +233,4 @@ void censorArgvArray(int argc, char** argv) {
}
}
} // namespace cmdline_utils
-}
+} // namespace mongo
diff --git a/src/mongo/util/cmdline_utils/censor_cmdline.h b/src/mongo/util/cmdline_utils/censor_cmdline.h
index df9ef9d1729..806b5d1ef60 100644
--- a/src/mongo/util/cmdline_utils/censor_cmdline.h
+++ b/src/mongo/util/cmdline_utils/censor_cmdline.h
@@ -46,4 +46,4 @@ void censorArgsVector(std::vector<std::string>* args);
void censorBSONObj(BSONObj* params);
} // namespace cmdline_utils
-}
+} // namespace mongo
diff --git a/src/mongo/util/concurrency/idle_thread_block.cpp b/src/mongo/util/concurrency/idle_thread_block.cpp
index 2886a24edd7..64426a47774 100644
--- a/src/mongo/util/concurrency/idle_thread_block.cpp
+++ b/src/mongo/util/concurrency/idle_thread_block.cpp
@@ -36,7 +36,7 @@ namespace mongo {
namespace for_debuggers {
// This needs external linkage to ensure that debuggers can use it.
thread_local const char* idleThreadLocation = nullptr;
-}
+} // namespace for_debuggers
using for_debuggers::idleThreadLocation;
void IdleThreadBlock::beginIdleThreadBlock(const char* location) {
@@ -48,4 +48,4 @@ void IdleThreadBlock::endIdleThreadBlock() {
invariant(idleThreadLocation);
idleThreadLocation = nullptr;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/concurrency/mutex.h b/src/mongo/util/concurrency/mutex.h
index 30ee48f8a30..f252fe14cd8 100644
--- a/src/mongo/util/concurrency/mutex.h
+++ b/src/mongo/util/concurrency/mutex.h
@@ -44,7 +44,7 @@ namespace mongo {
* timeout). Thus it can be implemented using OS-specific
* facilities in all environments (if desired). On Windows,
* the implementation below is faster than boost mutex.
-*/
+ */
#if defined(_WIN32)
class SimpleMutex {
diff --git a/src/mongo/util/concurrency/thread_name.cpp b/src/mongo/util/concurrency/thread_name.cpp
index 7aa58a3b6f4..03a6ab181c4 100644
--- a/src/mongo/util/concurrency/thread_name.cpp
+++ b/src/mongo/util/concurrency/thread_name.cpp
@@ -145,8 +145,8 @@ void setThreadName(StringData name) {
// limit, it's best to shorten long names.
int error = 0;
if (threadName.size() > 15) {
- std::string shortName = str::stream() << threadName.substr(0, 7) << '.'
- << threadName.substr(threadName.size() - 7);
+ std::string shortName = str::stream()
+ << threadName.substr(0, 7) << '.' << threadName.substr(threadName.size() - 7);
error = pthread_setname_np(pthread_self(), shortName.c_str());
} else {
error = pthread_setname_np(pthread_self(), threadName.rawData());
diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp
index 69e4e0f64b3..fd8d23377ea 100644
--- a/src/mongo/util/concurrency/thread_pool.cpp
+++ b/src/mongo/util/concurrency/thread_pool.cpp
@@ -173,8 +173,8 @@ void ThreadPool::_drainPendingTasks() {
// Tasks cannot be run inline because they can create OperationContexts and the join() caller
// may already have one associated with the thread.
stdx::thread cleanThread = stdx::thread([&] {
- const std::string threadName = str::stream() << _options.threadNamePrefix
- << _nextThreadId++;
+ const std::string threadName = str::stream()
+ << _options.threadNamePrefix << _nextThreadId++;
setThreadName(threadName);
_options.onCreateThread(threadName);
stdx::unique_lock<stdx::mutex> lock(_mutex);
diff --git a/src/mongo/util/concurrency/ticketholder.cpp b/src/mongo/util/concurrency/ticketholder.cpp
index d836f977b67..e30746807ae 100644
--- a/src/mongo/util/concurrency/ticketholder.cpp
+++ b/src/mongo/util/concurrency/ticketholder.cpp
@@ -137,8 +137,7 @@ Status TicketHolder::resize(int newSize) {
if (newSize > SEM_VALUE_MAX)
return Status(ErrorCodes::BadValue,
str::stream() << "Maximum value for semaphore is " << SEM_VALUE_MAX
- << "; given "
- << newSize);
+ << "; given " << newSize);
while (_outof.load() < newSize) {
release();
@@ -254,4 +253,4 @@ bool TicketHolder::_tryAcquire() {
return true;
}
#endif
-}
+} // namespace mongo
diff --git a/src/mongo/util/concurrency/value.h b/src/mongo/util/concurrency/value.h
index b2759be742a..a0a03d9d260 100644
--- a/src/mongo/util/concurrency/value.h
+++ b/src/mongo/util/concurrency/value.h
@@ -79,4 +79,4 @@ public:
// multiple operations
bool operator==(const std::string& s) const;
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/debugger.cpp b/src/mongo/util/debugger.cpp
index 16a319e87f3..53cbeedbe09 100644
--- a/src/mongo/util/debugger.cpp
+++ b/src/mongo/util/debugger.cpp
@@ -122,4 +122,4 @@ void setupSIGTRAPforGDB() {
#else
void setupSIGTRAPforGDB() {}
#endif
-}
+} // namespace mongo
diff --git a/src/mongo/util/decimal_counter.h b/src/mongo/util/decimal_counter.h
index a85d23f3dd9..b090989792f 100644
--- a/src/mongo/util/decimal_counter.h
+++ b/src/mongo/util/decimal_counter.h
@@ -101,4 +101,4 @@ private:
uint8_t _lastDigitIndex = 0; // Indicates the last digit in _digits.
T _counter = 0;
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/diagnostic_info.cpp b/src/mongo/util/diagnostic_info.cpp
index 1a19f29ac8d..fbf84aff8cf 100644
--- a/src/mongo/util/diagnostic_info.cpp
+++ b/src/mongo/util/diagnostic_info.cpp
@@ -121,7 +121,7 @@ public:
DiagnosticInfo::StackFrame getFrame(void* instructionPtr) const {
auto it = --_map.upper_bound(instructionPtr);
- auto & [ objectPtr, frame ] = *it;
+ auto& [objectPtr, frame] = *it;
ptrdiff_t instructionOffset =
static_cast<char*>(instructionPtr) - static_cast<char*>(objectPtr);
return DiagnosticInfo::StackFrame{
diff --git a/src/mongo/util/diagnostic_info.h b/src/mongo/util/diagnostic_info.h
index 160ec11e40c..ebfb50885aa 100644
--- a/src/mongo/util/diagnostic_info.h
+++ b/src/mongo/util/diagnostic_info.h
@@ -94,4 +94,4 @@ private:
* Captures the diagnostic information based on the caller's context.
*/
DiagnosticInfo takeDiagnosticInfo(const StringData& captureName);
-} // namespace monogo
+} // namespace mongo
diff --git a/src/mongo/util/diagnostic_info_test.cpp b/src/mongo/util/diagnostic_info_test.cpp
index c12155b98d2..9c9b9524a38 100644
--- a/src/mongo/util/diagnostic_info_test.cpp
+++ b/src/mongo/util/diagnostic_info_test.cpp
@@ -59,4 +59,4 @@ TEST(DiagnosticInfo, BasicSingleThread) {
clockSourcePointer->advance(Seconds(1));
ASSERT_LT(capture2.getTimestamp(), clockSourcePointer->now());
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/dns_name.h b/src/mongo/util/dns_name.h
index 023bee10516..8b913cadd21 100644
--- a/src/mongo/util/dns_name.h
+++ b/src/mongo/util/dns_name.h
@@ -402,7 +402,7 @@ private:
void streamCore(StreamLike& os) const {
std::for_each(rbegin(_nameComponents),
rend(_nameComponents),
- [ first = true, &os ](const auto& component) mutable {
+ [first = true, &os](const auto& component) mutable {
if (!first)
os << '.';
first = false;
@@ -439,7 +439,7 @@ private:
// FQDNs and Relative Names are discriminated by this field.
Qualification fullyQualified;
};
-} // detail_dns_host_name
+} // namespace detail_dns_host_name
// The `operator==` function has to be defined out-of-line, because it uses `make_equality_lens`
// which is an auto-deduced return type function defined later in the class body.
diff --git a/src/mongo/util/dns_query_test.cpp b/src/mongo/util/dns_query_test.cpp
index b7dac331a12..8c6330b1557 100644
--- a/src/mongo/util/dns_query_test.cpp
+++ b/src/mongo/util/dns_query_test.cpp
@@ -110,11 +110,13 @@ TEST(MongoDnsQuery, srvRecords) {
} tests[] = {
{"test1.test.build.10gen.cc.",
{
- {"localhost.test.build.10gen.cc.", 27017}, {"localhost.test.build.10gen.cc.", 27018},
+ {"localhost.test.build.10gen.cc.", 27017},
+ {"localhost.test.build.10gen.cc.", 27018},
}},
{"test2.test.build.10gen.cc.",
{
- {"localhost.test.build.10gen.cc.", 27018}, {"localhost.test.build.10gen.cc.", 27019},
+ {"localhost.test.build.10gen.cc.", 27018},
+ {"localhost.test.build.10gen.cc.", 27019},
}},
{"test3.test.build.10gen.cc.",
{
@@ -174,7 +176,8 @@ TEST(MongoDnsQuery, txtRecords) {
}},
{"test6.test.build.10gen.cc",
{
- "authSource=otherDB", "replicaSet=repl0",
+ "authSource=otherDB",
+ "replicaSet=repl0",
}},
};
diff --git a/src/mongo/util/exception_filter_win32.cpp b/src/mongo/util/exception_filter_win32.cpp
index 5f404d2bf8a..b3a6ebec0b6 100644
--- a/src/mongo/util/exception_filter_win32.cpp
+++ b/src/mongo/util/exception_filter_win32.cpp
@@ -178,7 +178,7 @@ LONG WINAPI exceptionFilter(struct _EXCEPTION_POINTERS* excPointers) {
// We won't reach here
return EXCEPTION_EXECUTE_HANDLER;
}
-}
+} // namespace
LPTOP_LEVEL_EXCEPTION_FILTER filtLast = 0;
@@ -192,6 +192,6 @@ void setWindowsUnhandledExceptionFilter() {
namespace mongo {
void setWindowsUnhandledExceptionFilter() {}
-}
+} // namespace mongo
#endif // _WIN32
diff --git a/src/mongo/util/exit.cpp b/src/mongo/util/exit.cpp
index b08545f8c2d..b92b59253ea 100644
--- a/src/mongo/util/exit.cpp
+++ b/src/mongo/util/exit.cpp
@@ -114,8 +114,9 @@ void shutdown(ExitCode code, const ShutdownTaskArgs& shutdownArgs) {
ExitCode originallyRequestedCode = shutdownExitCode.get();
if (code != originallyRequestedCode) {
log() << "While running shutdown tasks with the intent to exit with code "
- << originallyRequestedCode << ", an additional shutdown request arrived with "
- "the intent to exit with a different exit code "
+ << originallyRequestedCode
+ << ", an additional shutdown request arrived with "
+ "the intent to exit with a different exit code "
<< code << "; ignoring the conflicting exit code";
}
diff --git a/src/mongo/util/fail_point.cpp b/src/mongo/util/fail_point.cpp
index 9582a0e4939..d5af18f318f 100644
--- a/src/mongo/util/fail_point.cpp
+++ b/src/mongo/util/fail_point.cpp
@@ -267,4 +267,4 @@ BSONObj FailPoint::toBSON() const {
return builder.obj();
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/fail_point_test.cpp b/src/mongo/util/fail_point_test.cpp
index 2a759a5216a..0a32ec45777 100644
--- a/src/mongo/util/fail_point_test.cpp
+++ b/src/mongo/util/fail_point_test.cpp
@@ -43,10 +43,10 @@
#include "mongo/util/log.h"
#include "mongo/util/time_support.h"
-using mongo::getGlobalFailPointRegistry;
using mongo::BSONObj;
using mongo::FailPoint;
using mongo::FailPointEnableBlock;
+using mongo::getGlobalFailPointRegistry;
namespace stdx = mongo::stdx;
namespace mongo_test {
@@ -398,8 +398,7 @@ TEST(FailPoint, parseBSONInvalidDataFails) {
TEST(FailPoint, parseBSONValidDataSucceeds) {
auto swTuple = FailPoint::parseBSON(BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("a" << 1)));
+ << "data" << BSON("a" << 1)));
ASSERT_TRUE(swTuple.isOK());
}
@@ -448,4 +447,4 @@ TEST(FailPoint, FailPointBlockIfBasicTest) {
ASSERT(!"shouldn't get here");
}
}
-}
+} // namespace mongo_test
diff --git a/src/mongo/util/file.cpp b/src/mongo/util/file.cpp
index 9096a11b23e..b3c85b4c34e 100644
--- a/src/mongo/util/file.cpp
+++ b/src/mongo/util/file.cpp
@@ -139,12 +139,8 @@ void File::read(fileofs o, char* data, unsigned len) {
_bad = true;
msgasserted(10438,
str::stream() << "In File::read(), ReadFile for '" << _name << "' read "
- << bytesRead
- << " bytes while trying to read "
- << len
- << " bytes starting at offset "
- << o
- << ", truncated file?");
+ << bytesRead << " bytes while trying to read " << len
+ << " bytes starting at offset " << o << ", truncated file?");
}
}
@@ -242,8 +238,7 @@ void File::open(const char* filename, bool readOnly, bool direct) {
_fd = ::open(filename,
(readOnly ? O_RDONLY : (O_CREAT | O_RDWR | O_NOATIME))
#if defined(O_DIRECT)
- |
- (direct ? O_DIRECT : 0)
+ | (direct ? O_DIRECT : 0)
#endif
,
S_IRUSR | S_IWUSR);
@@ -264,12 +259,8 @@ void File::read(fileofs o, char* data, unsigned len) {
_bad = true;
msgasserted(16569,
str::stream() << "In File::read(), ::pread for '" << _name << "' read "
- << bytesRead
- << " bytes while trying to read "
- << len
- << " bytes starting at offset "
- << o
- << ", truncated file?");
+ << bytesRead << " bytes while trying to read " << len
+ << " bytes starting at offset " << o << ", truncated file?");
}
}
@@ -297,4 +288,4 @@ void File::write(fileofs o, const char* data, unsigned len) {
}
#endif // _WIN32
-}
+} // namespace mongo
diff --git a/src/mongo/util/file.h b/src/mongo/util/file.h
index 6676ee21bd4..10511465142 100644
--- a/src/mongo/util/file.h
+++ b/src/mongo/util/file.h
@@ -69,4 +69,4 @@ private:
#endif
std::string _name;
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/future.h b/src/mongo/util/future.h
index f46f3c541e1..5bef6ff3209 100644
--- a/src/mongo/util/future.h
+++ b/src/mongo/util/future.h
@@ -1131,7 +1131,7 @@ NOINLINE_DECL auto ExecutorFuture<T>::wrapCBHelper(unique_function<Sig>&& func)
exec = _exec // can't move this!
](auto&&... args) mutable noexcept
->Future<UnwrappedType<decltype(func(std::forward<decltype(args)>(args)...))>> {
- auto[promise, future] = makePromiseFuture<
+ auto [promise, future] = makePromiseFuture<
UnwrappedType<decltype(func(std::forward<decltype(args)>(args)...))>>();
exec->schedule([
diff --git a/src/mongo/util/future_impl.h b/src/mongo/util/future_impl.h
index fe810446392..5650cf9fb90 100644
--- a/src/mongo/util/future_impl.h
+++ b/src/mongo/util/future_impl.h
@@ -1069,8 +1069,7 @@ public:
// TODO in C++17 with constexpr if this can be done cleaner and more efficiently by not
// throwing.
- return std::move(*this).onError([func =
- std::forward<Func>(func)](Status && status) mutable {
+ return std::move(*this).onError([func = std::forward<Func>(func)](Status&& status) mutable {
if (status != code)
uassertStatusOK(status);
return throwingCall(func, std::move(status));
@@ -1087,8 +1086,7 @@ public:
if (_immediate || (isReady() && _shared->status.isOK()))
return std::move(*this);
- return std::move(*this).onError([func =
- std::forward<Func>(func)](Status && status) mutable {
+ return std::move(*this).onError([func = std::forward<Func>(func)](Status&& status) mutable {
if (!ErrorCodes::isA<category>(status))
uassertStatusOK(status);
return throwingCall(func, std::move(status));
@@ -1110,9 +1108,8 @@ public:
static_assert(std::is_void<decltype(call(func, std::declval<const Status&>()))>::value,
"func passed to tapError must return void");
- return tapImpl(std::forward<Func>(func),
- [](Func && func, const T& val) noexcept {},
- [](Func && func, const Status& status) noexcept { call(func, status); });
+ return tapImpl(std::forward<Func>(func), [](Func && func, const T& val) noexcept {}, [
+ ](Func && func, const Status& status) noexcept { call(func, status); });
}
template <typename Func>
diff --git a/src/mongo/util/future_test_edge_cases.cpp b/src/mongo/util/future_test_edge_cases.cpp
index 53b4a837e9e..b81a049f94f 100644
--- a/src/mongo/util/future_test_edge_cases.cpp
+++ b/src/mongo/util/future_test_edge_cases.cpp
@@ -323,7 +323,7 @@ TEST(Future_EdgeCases, Racing_SharedPromise_getFuture_and_setError) {
TEST(Future_EdgeCases, SharedPromise_CompleteWithUnreadyFuture) {
SharedSemiFuture<void> sf;
- auto[promise, future] = makePromiseFuture<void>();
+ auto [promise, future] = makePromiseFuture<void>();
{
SharedPromise<void> sp;
diff --git a/src/mongo/util/future_test_executor_future.cpp b/src/mongo/util/future_test_executor_future.cpp
index 564d0e69cda..1c6dc09224c 100644
--- a/src/mongo/util/future_test_executor_future.cpp
+++ b/src/mongo/util/future_test_executor_future.cpp
@@ -37,36 +37,34 @@
namespace mongo {
namespace {
TEST(Executor_Future, Success_getAsync) {
- FUTURE_SUCCESS_TEST(
- [] {},
- [](/*Future<void>*/ auto&& fut) {
- auto exec = InlineCountingExecutor::make();
- auto pf = makePromiseFuture<void>();
- ExecutorFuture<void>(exec).thenRunOn(exec).getAsync([outside = std::move(pf.promise)](
- Status status) mutable {
- ASSERT_OK(status);
- outside.emplaceValue();
- });
- ASSERT_EQ(std::move(pf.future).getNoThrow(), Status::OK());
- ASSERT_EQ(exec->tasksRun.load(), 1);
- });
+ FUTURE_SUCCESS_TEST([] {},
+ [](/*Future<void>*/ auto&& fut) {
+ auto exec = InlineCountingExecutor::make();
+ auto pf = makePromiseFuture<void>();
+ ExecutorFuture<void>(exec).thenRunOn(exec).getAsync(
+ [outside = std::move(pf.promise)](Status status) mutable {
+ ASSERT_OK(status);
+ outside.emplaceValue();
+ });
+ ASSERT_EQ(std::move(pf.future).getNoThrow(), Status::OK());
+ ASSERT_EQ(exec->tasksRun.load(), 1);
+ });
}
TEST(Executor_Future, Reject_getAsync) {
- FUTURE_SUCCESS_TEST(
- [] {},
- [](/*Future<void>*/ auto&& fut) {
- auto exec = RejectingExecutor::make();
- auto pf = makePromiseFuture<void>();
- std::move(fut).thenRunOn(exec).getAsync([promise = std::move(pf.promise)](
- Status status) mutable {
- promise.emplaceValue(); // shouldn't be run anyway.
- FAIL("how did I run!?!?!");
- });
-
- // Promise is destroyed without calling the callback.
- ASSERT_EQ(std::move(pf.future).getNoThrow(), ErrorCodes::BrokenPromise);
- });
+ FUTURE_SUCCESS_TEST([] {},
+ [](/*Future<void>*/ auto&& fut) {
+ auto exec = RejectingExecutor::make();
+ auto pf = makePromiseFuture<void>();
+ std::move(fut).thenRunOn(exec).getAsync(
+ [promise = std::move(pf.promise)](Status status) mutable {
+ promise.emplaceValue(); // shouldn't be run anyway.
+ FAIL("how did I run!?!?!");
+ });
+
+ // Promise is destroyed without calling the callback.
+ ASSERT_EQ(std::move(pf.future).getNoThrow(), ErrorCodes::BrokenPromise);
+ });
}
TEST(Executor_Future, Success_then) {
diff --git a/src/mongo/util/future_test_future_int.cpp b/src/mongo/util/future_test_future_int.cpp
index 60691aca811..96023b210d3 100644
--- a/src/mongo/util/future_test_future_int.cpp
+++ b/src/mongo/util/future_test_future_int.cpp
@@ -77,16 +77,16 @@ TEST(Future, Success_semi_get) {
}
TEST(Future, Success_getAsync) {
- FUTURE_SUCCESS_TEST(
- [] { return 1; },
- [](/*Future<int>*/ auto&& fut) {
- auto pf = makePromiseFuture<int>();
- std::move(fut).getAsync([outside = std::move(pf.promise)](StatusWith<int> sw) mutable {
- ASSERT_OK(sw);
- outside.emplaceValue(sw.getValue());
- });
- ASSERT_EQ(std::move(pf.future).get(), 1);
- });
+ FUTURE_SUCCESS_TEST([] { return 1; },
+ [](/*Future<int>*/ auto&& fut) {
+ auto pf = makePromiseFuture<int>();
+ std::move(fut).getAsync(
+ [outside = std::move(pf.promise)](StatusWith<int> sw) mutable {
+ ASSERT_OK(sw);
+ outside.emplaceValue(sw.getValue());
+ });
+ ASSERT_EQ(std::move(pf.future).get(), 1);
+ });
}
TEST(Future, Fail_getLvalue) {
@@ -144,7 +144,6 @@ TEST(Future, Success_isReady) {
ASSERT_EQ(stdx::this_thread::get_id(), id);
ASSERT_EQ(status, 1);
});
-
});
}
@@ -157,7 +156,6 @@ TEST(Future, Fail_isReady) {
ASSERT_EQ(stdx::this_thread::get_id(), id);
ASSERT_NOT_OK(status);
});
-
});
}
diff --git a/src/mongo/util/future_test_future_move_only.cpp b/src/mongo/util/future_test_future_move_only.cpp
index 5c03813679b..7fd124b61c5 100644
--- a/src/mongo/util/future_test_future_move_only.cpp
+++ b/src/mongo/util/future_test_future_move_only.cpp
@@ -130,11 +130,11 @@ TEST(Future_MoveOnly, Success_getAsync) {
FUTURE_SUCCESS_TEST([] { return Widget(1); },
[](/*Future<Widget>*/ auto&& fut) {
auto pf = makePromiseFuture<Widget>();
- std::move(fut).getAsync([outside = std::move(pf.promise)](
- StatusWith<Widget> sw) mutable {
- ASSERT_OK(sw);
- outside.emplaceValue(std::move(sw.getValue()));
- });
+ std::move(fut).getAsync(
+ [outside = std::move(pf.promise)](StatusWith<Widget> sw) mutable {
+ ASSERT_OK(sw);
+ outside.emplaceValue(std::move(sw.getValue()));
+ });
ASSERT_EQ(std::move(pf.future).get(), 1);
});
}
diff --git a/src/mongo/util/future_test_future_void.cpp b/src/mongo/util/future_test_future_void.cpp
index 5281d1a15d3..c9e9f5dfa51 100644
--- a/src/mongo/util/future_test_future_void.cpp
+++ b/src/mongo/util/future_test_future_void.cpp
@@ -73,16 +73,16 @@ TEST(Future_Void, Success_semi_get) {
}
TEST(Future_Void, Success_getAsync) {
- FUTURE_SUCCESS_TEST(
- [] {},
- [](/*Future<void>*/ auto&& fut) {
- auto pf = makePromiseFuture<void>();
- std::move(fut).getAsync([outside = std::move(pf.promise)](Status status) mutable {
- ASSERT_OK(status);
- outside.emplaceValue();
- });
- ASSERT_EQ(std::move(pf.future).getNoThrow(), Status::OK());
- });
+ FUTURE_SUCCESS_TEST([] {},
+ [](/*Future<void>*/ auto&& fut) {
+ auto pf = makePromiseFuture<void>();
+ std::move(fut).getAsync(
+ [outside = std::move(pf.promise)](Status status) mutable {
+ ASSERT_OK(status);
+ outside.emplaceValue();
+ });
+ ASSERT_EQ(std::move(pf.future).getNoThrow(), Status::OK());
+ });
}
TEST(Future_Void, Fail_getLvalue) {
@@ -141,7 +141,6 @@ TEST(Future_Void, Success_isReady) {
ASSERT_EQ(stdx::this_thread::get_id(), id);
ASSERT_OK(status);
});
-
});
}
@@ -154,7 +153,6 @@ TEST(Future_Void, Fail_isReady) {
ASSERT_EQ(stdx::this_thread::get_id(), id);
ASSERT_NOT_OK(status);
});
-
});
}
diff --git a/src/mongo/util/future_test_shared_future.cpp b/src/mongo/util/future_test_shared_future.cpp
index 12914422412..5afb3f6d112 100644
--- a/src/mongo/util/future_test_shared_future.cpp
+++ b/src/mongo/util/future_test_shared_future.cpp
@@ -67,7 +67,8 @@ TEST(SharedFuture, isReady_shared_TSAN_OK) {
auto fut = async([&] {
done = true;
return 1;
- }).share();
+ })
+ .share();
//(void)*const_cast<volatile bool*>(&done); // Data Race! Uncomment to make sure TSAN works.
while (!fut.isReady()) {
}
@@ -183,7 +184,7 @@ TEST(SharedFuture, NoStackOverflow_Destruction) {
// Add 100 children that each use 100K of stack space on destruction.
for (int i = 0; i < 100; i++) {
collector.push_back(
- shared.thenRunOn(exec).then([x = Evil()]{}).semi());
+ shared.thenRunOn(exec).then([x = Evil()] {}).semi());
}
for (auto&& collected : collector) {
diff --git a/src/mongo/util/future_test_utils.h b/src/mongo/util/future_test_utils.h
index 77451f837ff..d4189f28efc 100644
--- a/src/mongo/util/future_test_utils.h
+++ b/src/mongo/util/future_test_utils.h
@@ -131,14 +131,15 @@ template <typename Func, typename Result = std::result_of_t<Func && ()>>
Future<Result> async(Func&& func) {
auto pf = makePromiseFuture<Result>();
- stdx::thread([ promise = std::move(pf.promise), func = std::forward<Func>(func) ]() mutable {
+ stdx::thread([promise = std::move(pf.promise), func = std::forward<Func>(func)]() mutable {
sleepIfShould();
try {
completePromise(&promise, func);
} catch (const DBException& ex) {
promise.setError(ex.toStatus());
}
- }).detach();
+ })
+ .detach();
return std::move(pf.future);
}
diff --git a/src/mongo/util/hex.cpp b/src/mongo/util/hex.cpp
index 1bbf362ada9..41255966894 100644
--- a/src/mongo/util/hex.cpp
+++ b/src/mongo/util/hex.cpp
@@ -107,4 +107,4 @@ std::string hexdump(const char* data, unsigned len) {
std::string s = ss.str();
return s;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/hex.h b/src/mongo/util/hex.h
index d58364fe54f..b01cb9b9336 100644
--- a/src/mongo/util/hex.h
+++ b/src/mongo/util/hex.h
@@ -129,4 +129,4 @@ std::string unsignedIntToFixedLengthHex(uint32_t val);
/* @return a dump of the buffer as hex byte ascii output */
std::string hexdump(const char* data, unsigned len);
-}
+} // namespace mongo
diff --git a/src/mongo/util/if_constexpr.h b/src/mongo/util/if_constexpr.h
index bf1dbdf8867..28900a243a1 100644
--- a/src/mongo/util/if_constexpr.h
+++ b/src/mongo/util/if_constexpr.h
@@ -31,6 +31,4 @@
// Terrible hack to work around clang-format being out of date.
// TODO sed this away and delete this file when we upgrade clang-format.
-#define IF_CONSTEXPR \
- if \
- constexpr
+#define IF_CONSTEXPR if constexpr
diff --git a/src/mongo/util/intrusive_counter.cpp b/src/mongo/util/intrusive_counter.cpp
index 59b177a1fc3..e33cbc87da5 100644
--- a/src/mongo/util/intrusive_counter.cpp
+++ b/src/mongo/util/intrusive_counter.cpp
@@ -39,8 +39,7 @@ using boost::intrusive_ptr;
intrusive_ptr<const RCString> RCString::create(StringData s) {
uassert(16493,
str::stream() << "Tried to create string longer than "
- << (BSONObjMaxUserSize / 1024 / 1024)
- << "MB",
+ << (BSONObjMaxUserSize / 1024 / 1024) << "MB",
s.size() < static_cast<size_t>(BSONObjMaxUserSize));
const size_t sizeWithNUL = s.size() + 1;
diff --git a/src/mongo/util/log.h b/src/mongo/util/log.h
index 68a16c34493..62441644344 100644
--- a/src/mongo/util/log.h
+++ b/src/mongo/util/log.h
@@ -174,15 +174,15 @@ inline bool shouldLog(logger::LogSeverity severity) {
} // namespace
// MONGO_LOG uses log component from MongoLogDefaultComponent from current or global namespace.
-#define MONGO_LOG(DLEVEL) \
- if (!(::mongo::logger::globalLogDomain()) \
- ->shouldLog(MongoLogDefaultComponent_component, \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
- } else \
- ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
- ::mongo::getThreadName(), \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
- MongoLogDefaultComponent_component)
+#define MONGO_LOG(DLEVEL) \
+ if (!(::mongo::logger::globalLogDomain()) \
+ ->shouldLog(MongoLogDefaultComponent_component, \
+ ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
+ } else \
+ ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
+ ::mongo::getThreadName(), \
+ ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
+ MongoLogDefaultComponent_component)
#define LOG MONGO_LOG
@@ -190,32 +190,32 @@ inline bool shouldLog(logger::LogSeverity severity) {
if (!(::mongo::logger::globalLogDomain()) \
->shouldLog((COMPONENT1), ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
} else \
- ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
- ::mongo::getThreadName(), \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
- (COMPONENT1))
+ ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
+ ::mongo::getThreadName(), \
+ ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
+ (COMPONENT1))
#define MONGO_LOG_COMPONENT2(DLEVEL, COMPONENT1, COMPONENT2) \
if (!(::mongo::logger::globalLogDomain()) \
->shouldLog( \
(COMPONENT1), (COMPONENT2), ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
} else \
- ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
- ::mongo::getThreadName(), \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
- (COMPONENT1))
-
-#define MONGO_LOG_COMPONENT3(DLEVEL, COMPONENT1, COMPONENT2, COMPONENT3) \
- if (!(::mongo::logger::globalLogDomain()) \
- ->shouldLog((COMPONENT1), \
- (COMPONENT2), \
- (COMPONENT3), \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
- } else \
- ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
- ::mongo::getThreadName(), \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
- (COMPONENT1))
+ ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
+ ::mongo::getThreadName(), \
+ ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
+ (COMPONENT1))
+
+#define MONGO_LOG_COMPONENT3(DLEVEL, COMPONENT1, COMPONENT2, COMPONENT3) \
+ if (!(::mongo::logger::globalLogDomain()) \
+ ->shouldLog((COMPONENT1), \
+ (COMPONENT2), \
+ (COMPONENT3), \
+ ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
+ } else \
+ ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
+ ::mongo::getThreadName(), \
+ ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
+ (COMPONENT1))
/**
* Rotates the log files. Returns true if all logs rotate successfully.
diff --git a/src/mongo/util/log_and_backoff.cpp b/src/mongo/util/log_and_backoff.cpp
index e890f86a9ca..3438b4b23b7 100644
--- a/src/mongo/util/log_and_backoff.cpp
+++ b/src/mongo/util/log_and_backoff.cpp
@@ -40,8 +40,8 @@ void logAndBackoff(logger::LogComponent logComponent,
logger::LogSeverity logLevel,
size_t numAttempts,
StringData message) {
- MONGO_LOG_COMPONENT(logLevel, logComponent) << message
- << ". Retrying, attempt: " << numAttempts;
+ MONGO_LOG_COMPONENT(logLevel, logComponent)
+ << message << ". Retrying, attempt: " << numAttempts;
if (numAttempts < 4) {
// no-op
diff --git a/src/mongo/util/lru_cache_test.cpp b/src/mongo/util/lru_cache_test.cpp
index a8772bc5a93..b88e4297e46 100644
--- a/src/mongo/util/lru_cache_test.cpp
+++ b/src/mongo/util/lru_cache_test.cpp
@@ -277,7 +277,6 @@ TEST(LRUCacheTest, SizeOneCache) {
// Test cache eviction when the cache is full and new elements are added.
TEST(LRUCacheTest, EvictionTest) {
runWithDifferentSizes([](int maxSize) {
-
// Test eviction for any permutation of the original cache
for (int i = 0; i < maxSize; i++) {
LRUCache<int, int> cache(maxSize);
@@ -309,7 +308,6 @@ TEST(LRUCacheTest, EvictionTest) {
// from any original position in the cache.
TEST(LRUCacheTest, PromoteTest) {
runWithDifferentSizes([](int maxSize) {
-
// Test promotion for any position in the original cache
// i <= maxSize here, so we test promotion of cache.end(),
// and of a non-existent key.
@@ -354,7 +352,6 @@ TEST(LRUCacheTest, PromoteTest) {
// the existing entry and gets promoted properly
TEST(LRUCacheTest, ReplaceKeyTest) {
runWithDifferentSizes([](int maxSize) {
-
// Test replacement for any position in the original cache
for (int i = 0; i < maxSize; i++) {
LRUCache<int, int> cache(maxSize);
@@ -378,7 +375,6 @@ TEST(LRUCacheTest, ReplaceKeyTest) {
// the existing entry and gets promoted properly
TEST(LRUCacheTest, EraseByKey) {
runWithDifferentSizes([](int maxSize) {
-
// Test replacement for any position in the original cache
// i <= maxSize so we erase a non-existent element
for (int i = 0; i <= maxSize; i++) {
@@ -416,7 +412,6 @@ TEST(LRUCacheTest, EraseByKey) {
// Test removal of elements by iterator from the cache
TEST(LRUCacheTest, EraseByIterator) {
runWithDifferentSizes([](int maxSize) {
-
// Test replacement for any position in the original cache
for (int i = 0; i < maxSize; i++) {
LRUCache<int, int> cache(maxSize);
diff --git a/src/mongo/util/map_util.h b/src/mongo/util/map_util.h
index b576eb573af..5825cfe79b8 100644
--- a/src/mongo/util/map_util.h
+++ b/src/mongo/util/map_util.h
@@ -42,4 +42,4 @@ V mapFindWithDefault(const M& myMap, const K& key, const V& defaultValue = V())
return it->second;
}
-} // end namespace
+} // namespace mongo
diff --git a/src/mongo/util/md5_test.cpp b/src/mongo/util/md5_test.cpp
index 996cf6ae931..79d598eb040 100644
--- a/src/mongo/util/md5_test.cpp
+++ b/src/mongo/util/md5_test.cpp
@@ -35,4 +35,4 @@ namespace mongo {
TEST(MD5, BuiltIn1) {
ASSERT_EQUALS(0, do_md5_test());
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/md5main.cpp b/src/mongo/util/md5main.cpp
index 51291c69686..27047713d55 100644
--- a/src/mongo/util/md5main.cpp
+++ b/src/mongo/util/md5main.cpp
@@ -64,8 +64,8 @@ static const char* const version = "2002-04-13";
/* modified: not static, renamed */
/* Run the self-test. */
/*static*/ int
- // do_test(void)
- do_md5_test(void) {
+// do_test(void)
+do_md5_test(void) {
static const char* const test[7 * 2] = {
"",
"d41d8cd98f00b204e9800998ecf8427e",
diff --git a/src/mongo/util/net/cidr.cpp b/src/mongo/util/net/cidr.cpp
index 8e3f5899c42..20cc7e6069c 100644
--- a/src/mongo/util/net/cidr.cpp
+++ b/src/mongo/util/net/cidr.cpp
@@ -40,8 +40,8 @@
#endif
using std::begin;
-using std::find;
using std::end;
+using std::find;
namespace mongo {
@@ -139,7 +139,7 @@ BSONObjBuilder& BSONObjBuilderValueStream::operator<<<CIDR>(CIDR value) {
return *_builder;
}
-} // namespace
+} // namespace mongo
std::ostream& mongo::operator<<(std::ostream& s, const CIDR& cidr) {
return append(s, cidr._family, cidr._ip, cidr._len);
diff --git a/src/mongo/util/net/hostandport.cpp b/src/mongo/util/net/hostandport.cpp
index ed74befa3a7..eb22852a926 100644
--- a/src/mongo/util/net/hostandport.cpp
+++ b/src/mongo/util/net/hostandport.cpp
@@ -80,7 +80,7 @@ int HostAndPort::port() const {
bool HostAndPort::isLocalHost() const {
return (_host == "localhost" || str::startsWith(_host.c_str(), "127.") || _host == "::1" ||
_host == "anonymous unix socket" || _host.c_str()[0] == '/' // unix socket
- );
+ );
}
bool HostAndPort::isDefaultRoute() const {
@@ -135,8 +135,8 @@ Status HostAndPort::initialize(StringData s) {
if (openBracketPos != std::string::npos) {
if (openBracketPos != 0) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "'[' present, but not first character in "
- << s.toString());
+ str::stream()
+ << "'[' present, but not first character in " << s.toString());
}
if (closeBracketPos == std::string::npos) {
return Status(ErrorCodes::FailedToParse,
@@ -150,31 +150,29 @@ Status HostAndPort::initialize(StringData s) {
// If the last colon is inside the brackets, then there must not be a port.
if (s.size() != closeBracketPos + 1) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "missing colon after ']' before the port in "
- << s.toString());
+ str::stream()
+ << "missing colon after ']' before the port in " << s.toString());
}
colonPos = std::string::npos;
} else if (colonPos != closeBracketPos + 1) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Extraneous characters between ']' and pre-port ':'"
- << " in "
- << s.toString());
+ << " in " << s.toString());
}
} else if (closeBracketPos != std::string::npos) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "']' present without '[' in " << s.toString());
} else if (s.find(':') != colonPos) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "More than one ':' detected. If this is an ipv6 address,"
- << " it needs to be surrounded by '[' and ']'; "
- << s.toString());
+ str::stream()
+ << "More than one ':' detected. If this is an ipv6 address,"
+ << " it needs to be surrounded by '[' and ']'; " << s.toString());
}
if (hostPart.empty()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Empty host component parsing HostAndPort from \""
- << str::escape(s.toString())
- << "\"");
+ << str::escape(s.toString()) << "\"");
}
int port;
@@ -188,8 +186,7 @@ Status HostAndPort::initialize(StringData s) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Port number " << port
<< " out of range parsing HostAndPort from \""
- << str::escape(s.toString())
- << "\"");
+ << str::escape(s.toString()) << "\"");
}
} else {
port = -1;
diff --git a/src/mongo/util/net/http_client_none.cpp b/src/mongo/util/net/http_client_none.cpp
index 3e0789c116d..6a3d11e6b1a 100644
--- a/src/mongo/util/net/http_client_none.cpp
+++ b/src/mongo/util/net/http_client_none.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/util/net/http_client.h"
#include "mongo/base/status.h"
+#include "mongo/util/net/http_client.h"
namespace mongo {
diff --git a/src/mongo/util/net/http_client_winhttp.cpp b/src/mongo/util/net/http_client_winhttp.cpp
index 4ddab8046aa..f774e7387e4 100644
--- a/src/mongo/util/net/http_client_winhttp.cpp
+++ b/src/mongo/util/net/http_client_winhttp.cpp
@@ -60,7 +60,8 @@ namespace mongo {
namespace {
const LPCWSTR kAcceptTypes[] = {
- L"application/octet-stream", nullptr,
+ L"application/octet-stream",
+ nullptr,
};
struct ProcessedUrl {
@@ -253,8 +254,7 @@ private:
const auto msg = errnoWithDescription(err);
uasserted(ErrorCodes::OperationFailed,
str::stream() << "Failed receiving response from server"
- << ": "
- << msg);
+ << ": " << msg);
}
DWORD statusCode = 0;
diff --git a/src/mongo/util/net/private/socket_poll.cpp b/src/mongo/util/net/private/socket_poll.cpp
index 7726aa3c077..b032d8ac732 100644
--- a/src/mongo/util/net/private/socket_poll.cpp
+++ b/src/mongo/util/net/private/socket_poll.cpp
@@ -70,4 +70,4 @@ int socketPoll(pollfd* fdarray, unsigned long nfds, int timeout) {
}
#endif
-}
+} // namespace mongo
diff --git a/src/mongo/util/net/private/socket_poll.h b/src/mongo/util/net/private/socket_poll.h
index 705633f624b..8b0c116b66e 100644
--- a/src/mongo/util/net/private/socket_poll.h
+++ b/src/mongo/util/net/private/socket_poll.h
@@ -36,4 +36,4 @@
namespace mongo {
bool isPollSupported();
int socketPoll(pollfd* fdarray, unsigned long nfds, int timeout);
-}
+} // namespace mongo
diff --git a/src/mongo/util/net/sock.cpp b/src/mongo/util/net/sock.cpp
index bb616b4452e..b0d04d1a8c2 100644
--- a/src/mongo/util/net/sock.cpp
+++ b/src/mongo/util/net/sock.cpp
@@ -120,8 +120,8 @@ void setSockTimeouts(int sock, double secs) {
log() << "unable to set SO_RCVTIMEO: " << errnoWithDescription(WSAGetLastError());
status =
setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, reinterpret_cast<char*>(&timeout), sizeof(DWORD));
- DEV if (report && (status == SOCKET_ERROR)) log() << "unable to set SO_SNDTIMEO: "
- << errnoWithDescription(WSAGetLastError());
+ DEV if (report && (status == SOCKET_ERROR)) log()
+ << "unable to set SO_SNDTIMEO: " << errnoWithDescription(WSAGetLastError());
#else
struct timeval tv;
tv.tv_sec = (int)secs;
@@ -547,7 +547,7 @@ void Socket::handleSendError(int ret, const char* context) {
<< ' ' << remoteString();
throwSocketError(SocketErrorKind::SEND_ERROR, remoteString());
}
-}
+} // namespace mongo
void Socket::handleRecvError(int ret, int len) {
if (ret == 0) {
diff --git a/src/mongo/util/net/ssl/context_schannel.hpp b/src/mongo/util/net/ssl/context_schannel.hpp
index baabea394f6..fff06f9b188 100644
--- a/src/mongo/util/net/ssl/context_schannel.hpp
+++ b/src/mongo/util/net/ssl/context_schannel.hpp
@@ -53,28 +53,28 @@ public:
#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)
/// Move-construct a context from another.
/**
- * This constructor moves an SSL context from one object to another.
- *
- * @param other The other context object from which the move will occur.
- *
- * @note Following the move, the following operations only are valid for the
- * moved-from object:
- * @li Destruction.
- * @li As a target for move-assignment.
- */
+ * This constructor moves an SSL context from one object to another.
+ *
+ * @param other The other context object from which the move will occur.
+ *
+ * @note Following the move, the following operations only are valid for the
+ * moved-from object:
+ * @li Destruction.
+ * @li As a target for move-assignment.
+ */
ASIO_DECL context(context&& other);
/// Move-assign a context from another.
/**
- * This assignment operator moves an SSL context from one object to another.
- *
- * @param other The other context object from which the move will occur.
- *
- * @note Following the move, the following operations only are valid for the
- * moved-from object:
- * @li Destruction.
- * @li As a target for move-assignment.
- */
+ * This assignment operator moves an SSL context from one object to another.
+ *
+ * @param other The other context object from which the move will occur.
+ *
+ * @note Following the move, the following operations only are valid for the
+ * moved-from object:
+ * @li Destruction.
+ * @li As a target for move-assignment.
+ */
ASIO_DECL context& operator=(context&& other);
#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)
@@ -83,10 +83,10 @@ public:
/// Get the underlying implementation in the native type.
/**
- * This function may be used to obtain the underlying implementation of the
- * context. This is intended to allow access to context functionality that is
- * not otherwise provided.
- */
+ * This function may be used to obtain the underlying implementation of the
+ * context. This is intended to allow access to context functionality that is
+ * not otherwise provided.
+ */
ASIO_DECL native_handle_type native_handle();
private:
diff --git a/src/mongo/util/net/ssl/detail/impl/engine_apple.ipp b/src/mongo/util/net/ssl/detail/impl/engine_apple.ipp
index a78460e8d97..154f08707aa 100644
--- a/src/mongo/util/net/ssl/detail/impl/engine_apple.ipp
+++ b/src/mongo/util/net/ssl/detail/impl/engine_apple.ipp
@@ -63,16 +63,16 @@ public:
const auto status = static_cast<::OSStatus>(value);
apple::CFUniquePtr<::CFStringRef> errstr(::SecCopyErrorMessageString(status, nullptr));
if (!errstr) {
- return mongo::str::stream() << "Secure.Transport unknown error: "
- << static_cast<int>(status);
+ return mongo::str::stream()
+ << "Secure.Transport unknown error: " << static_cast<int>(status);
}
const auto len = ::CFStringGetMaximumSizeForEncoding(::CFStringGetLength(errstr.get()),
::kCFStringEncodingUTF8);
std::string ret;
ret.resize(len + 1);
if (!::CFStringGetCString(errstr.get(), &ret[0], len, ::kCFStringEncodingUTF8)) {
- return mongo::str::stream() << "Secure.Transport unknown error: "
- << static_cast<int>(status);
+ return mongo::str::stream()
+ << "Secure.Transport unknown error: " << static_cast<int>(status);
}
ret.resize(strlen(ret.c_str()));
diff --git a/src/mongo/util/net/ssl/detail/io.hpp b/src/mongo/util/net/ssl/detail/io.hpp
index 8a702abc9dd..d6e376b00f0 100644
--- a/src/mongo/util/net/ssl/detail/io.hpp
+++ b/src/mongo/util/net/ssl/detail/io.hpp
@@ -247,7 +247,7 @@ public:
// Release any waiting write operations.
core_.pending_write_.expires_at(core_.neg_infin());
- // Fall through to call handler.
+ // Fall through to call handler.
default:
diff --git a/src/mongo/util/net/ssl_manager.cpp b/src/mongo/util/net/ssl_manager.cpp
index 441fcb3e763..fae6729ce75 100644
--- a/src/mongo/util/net/ssl_manager.cpp
+++ b/src/mongo/util/net/ssl_manager.cpp
@@ -171,9 +171,7 @@ std::string RFC4514Parser::extractAttributeName() {
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "DN attribute names must begin with either a digit or an alpha"
- << " not \'"
- << ch
- << "\'");
+ << " not \'" << ch << "\'");
}
for (; ch != '=' && !done(); ch = _advance()) {
@@ -218,8 +216,7 @@ std::pair<std::string, RFC4514Parser::ValueTerminator> RFC4514Parser::extractVal
uassert(ErrorCodes::BadValue,
str::stream() << "Escaped hex value contains invalid character \'"
- << hexValStr[1]
- << "\'",
+ << hexValStr[1] << "\'",
isHex(hexValStr[1]));
const char hexVal = uassertStatusOK(fromHex(StringData(hexValStr.data(), 2)));
sb << hexVal;
@@ -247,8 +244,8 @@ std::pair<std::string, RFC4514Parser::ValueTerminator> RFC4514Parser::extractVal
}
} else if (isEscaped(ch)) {
uasserted(ErrorCodes::BadValue,
- str::stream() << "Found unescaped character that should be escaped: \'" << ch
- << "\'");
+ str::stream()
+ << "Found unescaped character that should be escaped: \'" << ch << "\'");
} else {
if (ch != ' ') {
trailingSpaces = 0;
@@ -832,9 +829,9 @@ StatusWith<std::string> readDERString(ConstDataRangeCursor& cdc) {
if (derString.getType() != DERType::UTF8String) {
return Status(ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "Unexpected DER Tag, Got "
- << static_cast<char>(derString.getType())
- << ", Expected UTF8String");
+ str::stream()
+ << "Unexpected DER Tag, Got " << static_cast<char>(derString.getType())
+ << ", Expected UTF8String");
}
return derString.readUtf8String();
@@ -970,9 +967,9 @@ StatusWith<stdx::unordered_set<RoleName>> parsePeerRoles(ConstDataRange cdrExten
if (swSet.getValue().getType() != DERType::SET) {
return Status(ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "Unexpected DER Tag, Got "
- << static_cast<char>(swSet.getValue().getType())
- << ", Expected SET");
+ str::stream()
+ << "Unexpected DER Tag, Got "
+ << static_cast<char>(swSet.getValue().getType()) << ", Expected SET");
}
ConstDataRangeCursor cdcSet(swSet.getValue().getSetRange());
diff --git a/src/mongo/util/net/ssl_manager.h b/src/mongo/util/net/ssl_manager.h
index 859f671d24b..ce7b7d9bfc7 100644
--- a/src/mongo/util/net/ssl_manager.h
+++ b/src/mongo/util/net/ssl_manager.h
@@ -69,7 +69,7 @@ Status validateOpensslCipherConfig(const std::string&);
* Validation callback for setParameter 'disableNonTLSConnectionLogging'.
*/
Status validateDisableNonTLSConnectionLogging(const bool&);
-}
+} // namespace mongo
#ifdef MONGO_CONFIG_SSL
namespace mongo {
@@ -220,8 +220,8 @@ public:
virtual const SSLConfiguration& getSSLConfiguration() const = 0;
/**
- * Fetches the error text for an error code, in a thread-safe manner.
- */
+ * Fetches the error text for an error code, in a thread-safe manner.
+ */
static std::string getSSLErrorMessage(int code);
/**
diff --git a/src/mongo/util/net/ssl_manager_apple.cpp b/src/mongo/util/net/ssl_manager_apple.cpp
index c6b828125e8..205837d1035 100644
--- a/src/mongo/util/net/ssl_manager_apple.cpp
+++ b/src/mongo/util/net/ssl_manager_apple.cpp
@@ -725,8 +725,7 @@ StatusWith<CFUniquePtr<::CFArrayRef>> loadPEM(const std::string& keyfilepath,
return Status(ErrorCodes::InvalidSSLConfiguration,
str::stream() << "Unable to load PEM from '" << keyfilepath << "'"
<< (passphrase.empty() ? "" : " with passphrase")
- << (msg.empty() ? "" : ": ")
- << msg);
+ << (msg.empty() ? "" : ": ") << msg);
};
std::ifstream pemFile(keyfilepath, std::ios::binary);
@@ -746,7 +745,9 @@ StatusWith<CFUniquePtr<::CFArrayRef>> loadPEM(const std::string& keyfilepath,
nullptr, reinterpret_cast<const uint8_t*>(passphrase.c_str()), passphrase.size()));
}
::SecItemImportExportKeyParameters params = {
- SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION, 0, cfpass.get(),
+ SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION,
+ 0,
+ cfpass.get(),
};
CFUniquePtr<CFStringRef> cfkeyfile(
@@ -771,8 +772,8 @@ StatusWith<CFUniquePtr<::CFArrayRef>> loadPEM(const std::string& keyfilepath,
"key. Consider using a certificate selector or PKCS#12 instead");
}
if (status != ::errSecSuccess) {
- return retFail(str::stream() << "Failing importing certificate(s): "
- << stringFromOSStatus(status));
+ return retFail(str::stream()
+ << "Failing importing certificate(s): " << stringFromOSStatus(status));
}
if (mode == kLoadPEMBindIdentities) {
diff --git a/src/mongo/util/net/ssl_manager_openssl.cpp b/src/mongo/util/net/ssl_manager_openssl.cpp
index 563c6a570ad..787b5d1d274 100644
--- a/src/mongo/util/net/ssl_manager_openssl.cpp
+++ b/src/mongo/util/net/ssl_manager_openssl.cpp
@@ -925,8 +925,9 @@ Status SSLManagerOpenSSL::initSSLContext(SSL_CTX* context,
}
// We use the address of the context as the session id context.
- if (0 == ::SSL_CTX_set_session_id_context(
- context, reinterpret_cast<unsigned char*>(&context), sizeof(context))) {
+ if (0 ==
+ ::SSL_CTX_set_session_id_context(
+ context, reinterpret_cast<unsigned char*>(&context), sizeof(context))) {
return Status(ErrorCodes::InvalidSSLConfiguration,
str::stream() << "Can not store ssl session id context: "
<< getSSLErrorMessage(ERR_get_error()));
@@ -1312,14 +1313,11 @@ Status SSLManagerOpenSSL::_setupSystemCA(SSL_CTX* context) {
// On non-Windows/non-Apple platforms, the OpenSSL libraries should have been configured
// with default locations for CA certificates.
if (SSL_CTX_set_default_verify_paths(context) != 1) {
- return {ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "error loading system CA certificates "
- << "(default certificate file: "
- << X509_get_default_cert_file()
- << ", "
- << "default certificate path: "
- << X509_get_default_cert_dir()
- << ")"};
+ return {
+ ErrorCodes::InvalidSSLConfiguration,
+ str::stream() << "error loading system CA certificates "
+ << "(default certificate file: " << X509_get_default_cert_file() << ", "
+ << "default certificate path: " << X509_get_default_cert_dir() << ")"};
}
#else
@@ -1363,17 +1361,17 @@ bool SSLManagerOpenSSL::_setupCRL(SSL_CTX* context, const std::string& crlFile)
}
/*
-* The interface layer between network and BIO-pair. The BIO-pair buffers
-* the data to/from the TLS layer.
-*/
+ * The interface layer between network and BIO-pair. The BIO-pair buffers
+ * the data to/from the TLS layer.
+ */
void SSLManagerOpenSSL::_flushNetworkBIO(SSLConnectionOpenSSL* conn) {
char buffer[BUFFER_SIZE];
int wantWrite;
/*
- * Write the complete contents of the buffer. Leaving the buffer
- * unflushed could cause a deadlock.
- */
+ * Write the complete contents of the buffer. Leaving the buffer
+ * unflushed could cause a deadlock.
+ */
while ((wantWrite = BIO_ctrl_pending(conn->networkBIO)) > 0) {
if (wantWrite > BUFFER_SIZE) {
wantWrite = BUFFER_SIZE;
diff --git a/src/mongo/util/net/ssl_manager_test.cpp b/src/mongo/util/net/ssl_manager_test.cpp
index acf4d04e438..a7335970125 100644
--- a/src/mongo/util/net/ssl_manager_test.cpp
+++ b/src/mongo/util/net/ssl_manager_test.cpp
@@ -187,7 +187,10 @@ TEST(SSLManager, MongoDBRolesParser) {
// Negative: Runt, only a tag and long length with wrong missing length
{
unsigned char derData[] = {
- 0x31, 0x88, 0xff, 0xff,
+ 0x31,
+ 0x88,
+ 0xff,
+ 0xff,
};
auto swPeer = parsePeerRoles(ConstDataRange(derData));
ASSERT_NOT_OK(swPeer.getStatus());
@@ -196,7 +199,10 @@ TEST(SSLManager, MongoDBRolesParser) {
// Negative: Runt, only a tag and long length
{
unsigned char derData[] = {
- 0x31, 0x82, 0xff, 0xff,
+ 0x31,
+ 0x82,
+ 0xff,
+ 0xff,
};
auto swPeer = parsePeerRoles(ConstDataRange(derData));
ASSERT_NOT_OK(swPeer.getStatus());
@@ -362,9 +368,7 @@ TEST(SSLManager, DNParsingAndNormalization) {
}
TEST(SSLManager, BadDNParsing) {
- std::vector<std::string> tests = {"CN=#12345",
- R"(CN=\B)",
- R"(CN=<", "\)"};
+ std::vector<std::string> tests = {"CN=#12345", R"(CN=\B)", R"(CN=<", "\)"};
for (const auto& test : tests) {
log() << "Testing bad DN: \"" << test << "\"";
auto swDN = parseDN(test);
diff --git a/src/mongo/util/net/ssl_manager_windows.cpp b/src/mongo/util/net/ssl_manager_windows.cpp
index 514d39094f6..d73f2f7d8f4 100644
--- a/src/mongo/util/net/ssl_manager_windows.cpp
+++ b/src/mongo/util/net/ssl_manager_windows.cpp
@@ -70,8 +70,8 @@ extern SSLManagerInterface* theSSLManager;
namespace {
/**
-* Free a Certificate Context.
-*/
+ * Free a Certificate Context.
+ */
struct CERTFree {
void operator()(const CERT_CONTEXT* p) noexcept {
if (p) {
@@ -83,8 +83,8 @@ struct CERTFree {
using UniqueCertificate = std::unique_ptr<const CERT_CONTEXT, CERTFree>;
/**
-* Free a CRL Handle
-*/
+ * Free a CRL Handle
+ */
struct CryptCRLFree {
void operator()(const CRL_CONTEXT* p) noexcept {
if (p) {
@@ -97,8 +97,8 @@ using UniqueCRL = std::unique_ptr<const CRL_CONTEXT, CryptCRLFree>;
/**
-* Free a Certificate Chain Context
-*/
+ * Free a Certificate Chain Context
+ */
struct CryptCertChainFree {
void operator()(const CERT_CHAIN_CONTEXT* p) noexcept {
if (p) {
@@ -111,10 +111,10 @@ using UniqueCertChain = std::unique_ptr<const CERT_CHAIN_CONTEXT, CryptCertChain
/**
-* A simple generic class to manage Windows handle like things. Behaves similiar to std::unique_ptr.
-*
-* Only supports move.
-*/
+ * A simple generic class to manage Windows handle like things. Behaves similiar to std::unique_ptr.
+ *
+ * Only supports move.
+ */
template <typename HandleT, class Deleter>
class AutoHandle {
public:
@@ -157,8 +157,8 @@ private:
};
/**
-* Free a HCRYPTPROV Handle
-*/
+ * Free a HCRYPTPROV Handle
+ */
struct CryptProviderFree {
void operator()(HCRYPTPROV const h) noexcept {
if (h) {
@@ -170,8 +170,8 @@ struct CryptProviderFree {
using UniqueCryptProvider = AutoHandle<HCRYPTPROV, CryptProviderFree>;
/**
-* Free a HCRYPTKEY Handle
-*/
+ * Free a HCRYPTKEY Handle
+ */
struct CryptKeyFree {
void operator()(HCRYPTKEY const h) noexcept {
if (h) {
@@ -184,7 +184,7 @@ using UniqueCryptKey = AutoHandle<HCRYPTKEY, CryptKeyFree>;
/**
* Free a CERTSTORE Handle
-*/
+ */
struct CertStoreFree {
void operator()(HCERTSTORE const p) noexcept {
if (p) {
@@ -199,8 +199,8 @@ struct CertStoreFree {
using UniqueCertStore = AutoHandle<HCERTSTORE, CertStoreFree>;
/**
-* Free a HCERTCHAINENGINE Handle
-*/
+ * Free a HCERTCHAINENGINE Handle
+ */
struct CertChainEngineFree {
void operator()(HCERTCHAINENGINE const p) noexcept {
if (p) {
@@ -816,8 +816,8 @@ StatusWith<UniqueCertificateWithPrivateKey> readCertPEMFile(StringData fileName,
// Use the the log file if possible
if (!serverGlobalParams.logpath.empty()) {
static AtomicWord<int> counter{0};
- std::string keyContainerName = str::stream() << serverGlobalParams.logpath
- << counter.fetchAndAdd(1);
+ std::string keyContainerName = str::stream()
+ << serverGlobalParams.logpath << counter.fetchAndAdd(1);
wstr = toNativeString(keyContainerName.c_str());
} else {
auto us = UUID::gen().toString();
@@ -846,8 +846,8 @@ StatusWith<UniqueCertificateWithPrivateKey> readCertPEMFile(StringData fileName,
} else {
return Status(ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "CryptAcquireContextW failed "
- << errnoWithDescription(gle));
+ str::stream()
+ << "CryptAcquireContextW failed " << errnoWithDescription(gle));
}
}
} else {
@@ -857,8 +857,8 @@ StatusWith<UniqueCertificateWithPrivateKey> readCertPEMFile(StringData fileName,
if (!ret) {
DWORD gle = GetLastError();
return Status(ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "CryptAcquireContextW failed "
- << errnoWithDescription(gle));
+ str::stream()
+ << "CryptAcquireContextW failed " << errnoWithDescription(gle));
}
}
UniqueCryptProvider cryptProvider(hProv);
@@ -1013,8 +1013,8 @@ Status readCRLPEMFile(HCERTSTORE certStore, StringData fileName) {
if (!ret) {
DWORD gle = GetLastError();
return Status(ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "CertAddCRLContextToStore Failed "
- << errnoWithDescription(gle));
+ str::stream()
+ << "CertAddCRLContextToStore Failed " << errnoWithDescription(gle));
}
}
@@ -1061,8 +1061,7 @@ StatusWith<UniqueCertificate> loadCertificateSelectorFromStore(
DWORD gle = GetLastError();
return Status(ErrorCodes::InvalidSSLConfiguration,
str::stream() << "CertOpenStore failed to open store 'My' from '" << storeName
- << "': "
- << errnoWithDescription(gle));
+ << "': " << errnoWithDescription(gle));
}
UniqueCertStore storeHolder(store);
@@ -1082,11 +1081,8 @@ StatusWith<UniqueCertificate> loadCertificateSelectorFromStore(
ErrorCodes::InvalidSSLConfiguration,
str::stream()
<< "CertFindCertificateInStore failed to find cert with subject name '"
- << selector.subject.c_str()
- << "' in 'My' store in '"
- << storeName
- << "': "
- << errnoWithDescription(gle));
+ << selector.subject.c_str() << "' in 'My' store in '" << storeName
+ << "': " << errnoWithDescription(gle));
}
return UniqueCertificate(cert);
@@ -1106,10 +1102,8 @@ StatusWith<UniqueCertificate> loadCertificateSelectorFromStore(
str::stream()
<< "CertFindCertificateInStore failed to find cert with thumbprint '"
<< toHex(selector.thumbprint.data(), selector.thumbprint.size())
- << "' in 'My' store in '"
- << storeName
- << "': "
- << errnoWithDescription(gle));
+ << "' in 'My' store in '" << storeName
+ << "': " << errnoWithDescription(gle));
}
return UniqueCertificate(cert);
@@ -1636,8 +1630,8 @@ Status validatePeerCertificate(const std::string& remoteHost,
if (!ret) {
DWORD gle = GetLastError();
return Status(ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "CertGetCertificateChain failed: "
- << errnoWithDescription(gle));
+ str::stream()
+ << "CertGetCertificateChain failed: " << errnoWithDescription(gle));
}
UniqueCertChain certChainHolder(chainContext);
@@ -1761,8 +1755,8 @@ StatusWith<TLSVersion> mapTLSVersion(PCtxtHandle ssl) {
if (ss != SEC_E_OK) {
return Status(ErrorCodes::SSLHandshakeFailed,
- str::stream() << "QueryContextAttributes for connection info failed with"
- << ss);
+ str::stream()
+ << "QueryContextAttributes for connection info failed with" << ss);
}
switch (connInfo.dwProtocol) {
diff --git a/src/mongo/util/net/ssl_options.cpp b/src/mongo/util/net/ssl_options.cpp
index bc87567cafe..30389860a29 100644
--- a/src/mongo/util/net/ssl_options.cpp
+++ b/src/mongo/util/net/ssl_options.cpp
@@ -145,15 +145,14 @@ Status parseCertificateSelector(SSLParams::CertificateSelector* selector,
if (key != "thumbprint") {
return {ErrorCodes::BadValue,
str::stream() << "Unknown certificate selector property for '" << name << "': '"
- << key
- << "'"};
+ << key << "'"};
}
auto swHex = hexToVector(value.substr(delim + 1));
if (!swHex.isOK()) {
return {ErrorCodes::BadValue,
- str::stream() << "Invalid certificate selector value for '" << name << "': "
- << swHex.getStatus().reason()};
+ str::stream() << "Invalid certificate selector value for '" << name
+ << "': " << swHex.getStatus().reason()};
}
selector->thumbprint = std::move(swHex.getValue());
@@ -174,8 +173,7 @@ StatusWith<SSLParams::SSLModes> SSLParams::sslModeParse(StringData strMode) {
return Status(
ErrorCodes::BadValue,
str::stream()
- << "Invalid sslMode setting '"
- << strMode
+ << "Invalid sslMode setting '" << strMode
<< "', expected one of: 'disabled', 'allowSSL', 'preferSSL', or 'requireSSL'");
}
}
@@ -193,8 +191,7 @@ StatusWith<SSLParams::SSLModes> SSLParams::tlsModeParse(StringData strMode) {
return Status(
ErrorCodes::BadValue,
str::stream()
- << "Invalid tlsMode setting '"
- << strMode
+ << "Invalid tlsMode setting '" << strMode
<< "', expected one of: 'disabled', 'allowTLS', 'preferTLS', or 'requireTLS'");
}
}
diff --git a/src/mongo/util/net/ssl_options.h b/src/mongo/util/net/ssl_options.h
index 27402e8a923..cb820ba91e7 100644
--- a/src/mongo/util/net/ssl_options.h
+++ b/src/mongo/util/net/ssl_options.h
@@ -93,23 +93,23 @@ struct SSLParams {
enum SSLModes : int {
/**
- * Make unencrypted outgoing connections and do not accept incoming SSL-connections.
- */
+ * Make unencrypted outgoing connections and do not accept incoming SSL-connections.
+ */
SSLMode_disabled,
/**
- * Make unencrypted outgoing connections and accept both unencrypted and SSL-connections.
- */
+ * Make unencrypted outgoing connections and accept both unencrypted and SSL-connections.
+ */
SSLMode_allowSSL,
/**
- * Make outgoing SSL-connections and accept both unecrypted and SSL-connections.
- */
+ * Make outgoing SSL-connections and accept both unecrypted and SSL-connections.
+ */
SSLMode_preferSSL,
/**
- * Make outgoing SSL-connections and only accept incoming SSL-connections.
- */
+ * Make outgoing SSL-connections and only accept incoming SSL-connections.
+ */
SSLMode_requireSSL
};
@@ -137,10 +137,10 @@ Status storeSSLDisabledProtocols(
SSLDisabledProtocolsMode mode = SSLDisabledProtocolsMode::kStandardFormat);
/**
-* The global SSL configuration. This should be accessed only after global initialization has
-* completed. If it must be accessed in an initializer, the initializer should have
-* "EndStartupOptionStorage" as a prerequisite.
-*/
+ * The global SSL configuration. This should be accessed only after global initialization has
+ * completed. If it must be accessed in an initializer, the initializer should have
+ * "EndStartupOptionStorage" as a prerequisite.
+ */
const SSLParams& getSSLGlobalParams();
Status parseCertificateSelector(SSLParams::CertificateSelector* selector,
diff --git a/src/mongo/util/net/ssl_parameters.cpp b/src/mongo/util/net/ssl_parameters.cpp
index 0ace15fb3a2..fd1f8d23c58 100644
--- a/src/mongo/util/net/ssl_parameters.cpp
+++ b/src/mongo/util/net/ssl_parameters.cpp
@@ -70,9 +70,9 @@ StatusWith<ServerGlobalParams::ClusterAuthModes> clusterAuthModeParse(StringData
} else if (strMode == "x509") {
return ServerGlobalParams::ClusterAuthMode_x509;
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Invalid clusterAuthMode '" << strMode
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Invalid clusterAuthMode '" << strMode
<< "', expected one of: 'keyFile', 'sendKeyFile', 'sendX509', or 'x509'");
}
}
@@ -97,8 +97,7 @@ StatusWith<SSLParams::SSLModes> checkTLSModeTransition(T modeToString,
return {ErrorCodes::BadValue,
str::stream() << "Illegal state transition for " << parameterName
<< ", attempt to change from "
- << modeToString(static_cast<SSLParams::SSLModes>(oldMode))
- << " to "
+ << modeToString(static_cast<SSLParams::SSLModes>(oldMode)) << " to "
<< strMode};
}
}
diff --git a/src/mongo/util/net/ssl_parameters_auth.cpp b/src/mongo/util/net/ssl_parameters_auth.cpp
index fd821f4e52d..612c2bc70cc 100644
--- a/src/mongo/util/net/ssl_parameters_auth.cpp
+++ b/src/mongo/util/net/ssl_parameters_auth.cpp
@@ -67,9 +67,9 @@ StatusWith<ServerGlobalParams::ClusterAuthModes> clusterAuthModeParse(StringData
} else if (strMode == "x509") {
return ServerGlobalParams::ClusterAuthMode_x509;
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Invalid clusterAuthMode '" << strMode
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Invalid clusterAuthMode '" << strMode
<< "', expected one of: 'keyFile', 'sendKeyFile', 'sendX509', or 'x509'");
}
}
@@ -99,18 +99,16 @@ Status ClusterAuthModeServerParameter::setFromString(const std::string& strMode)
"connections"};
}
serverGlobalParams.clusterAuthMode.store(mode);
- auth::setInternalUserAuthParams(
- BSON(saslCommandMechanismFieldName << "MONGODB-X509" << saslCommandUserDBFieldName
- << "$external"));
+ auth::setInternalUserAuthParams(BSON(saslCommandMechanismFieldName
+ << "MONGODB-X509" << saslCommandUserDBFieldName
+ << "$external"));
} else if ((mode == ServerGlobalParams::ClusterAuthMode_x509) &&
(oldMode == ServerGlobalParams::ClusterAuthMode_sendX509)) {
serverGlobalParams.clusterAuthMode.store(mode);
} else {
return {ErrorCodes::BadValue,
str::stream() << "Illegal state transition for clusterAuthMode, change from "
- << clusterAuthModeFormat()
- << " to "
- << strMode};
+ << clusterAuthModeFormat() << " to " << strMode};
}
return Status::OK();
diff --git a/src/mongo/util/net/ssl_stream.cpp b/src/mongo/util/net/ssl_stream.cpp
index 80e3503f7ae..1aeb82d2189 100644
--- a/src/mongo/util/net/ssl_stream.cpp
+++ b/src/mongo/util/net/ssl_stream.cpp
@@ -40,8 +40,8 @@ namespace asio {
namespace ssl {
namespace detail {
MONGO_FAIL_POINT_DEFINE(smallTLSReads);
-} // namespce detail
-} // namespce ssl
-} // namespce asio
+} // namespace detail
+} // namespace ssl
+} // namespace asio
#endif
diff --git a/src/mongo/util/ntservice.cpp b/src/mongo/util/ntservice.cpp
index c5133a9c99a..512200c2744 100644
--- a/src/mongo/util/ntservice.cpp
+++ b/src/mongo/util/ntservice.cpp
@@ -625,7 +625,7 @@ void startService() {
}
}
-} // namspace ntservice
+} // namespace ntservice
} // namespace mongo
#endif
diff --git a/src/mongo/util/options_parser/constraints.h b/src/mongo/util/options_parser/constraints.h
index 796f7de8721..6be67a8f31c 100644
--- a/src/mongo/util/options_parser/constraints.h
+++ b/src/mongo/util/options_parser/constraints.h
@@ -131,10 +131,9 @@ private:
T typedVal;
if (!val.get(&typedVal).isOK()) {
return {ErrorCodes::InternalError,
- str::stream() << "Error: value for key: " << _key << " was found as type: "
- << val.typeToString()
- << " but is required to be type: "
- << typeid(typedVal).name()};
+ str::stream() << "Error: value for key: " << _key
+ << " was found as type: " << val.typeToString()
+ << " but is required to be type: " << typeid(typedVal).name()};
}
return _valueCallback(typedVal);
diff --git a/src/mongo/util/options_parser/environment_test.cpp b/src/mongo/util/options_parser/environment_test.cpp
index 9f0737e2ad6..6fbd3d70048 100644
--- a/src/mongo/util/options_parser/environment_test.cpp
+++ b/src/mongo/util/options_parser/environment_test.cpp
@@ -92,8 +92,7 @@ TEST(ToBSONTests, DottedValues) {
ASSERT_OK(environment.set(moe::Key("val1.dotted2"), moe::Value(std::string("string"))));
mongo::BSONObj obj = BSON("val1" << BSON("dotted1" << 6 << "dotted2"
<< "string")
- << "val2"
- << true);
+ << "val2" << true);
// TODO: Put a comparison here that doesn't depend on the field order. Right now it is
// based on the sort order of keys in a std::map.
ASSERT_BSONOBJ_EQ(obj, environment.toBSON());
@@ -108,12 +107,10 @@ TEST(ToBSONTests, DeepDottedValues) {
ASSERT_OK(environment.set(moe::Key("val2"), moe::Value(6.0)));
mongo::BSONObj obj =
BSON("val1" << BSON("first1" << BSON("second1" << BSON("third1" << 6 << "third2" << true)
- << "second2"
- << BSON("third1" << false))
+ << "second2" << BSON("third1" << false))
<< "first2"
<< "string")
- << "val2"
- << 6.0);
+ << "val2" << 6.0);
// TODO: Put a comparison here that doesn't depend on the field order. Right now it is
// based on the sort order of keys in a std::map.
ASSERT_BSONOBJ_EQ(obj, environment.toBSON());
diff --git a/src/mongo/util/options_parser/option_section.cpp b/src/mongo/util/options_parser/option_section.cpp
index e8f3c6f9927..d54922fb29c 100644
--- a/src/mongo/util/options_parser/option_section.cpp
+++ b/src/mongo/util/options_parser/option_section.cpp
@@ -157,8 +157,7 @@ OptionDescription& OptionSection::addOptionChaining(
// Should not be the same as dottedName.
uassert(ErrorCodes::InternalError,
str::stream() << "Attempted to register option with conflict between dottedName and "
- << "deprecatedDottedName: "
- << dottedName,
+ << "deprecatedDottedName: " << dottedName,
!std::count(deprecatedDottedNames.begin(), deprecatedDottedNames.end(), dottedName));
// Verify deprecated single names.
@@ -170,8 +169,7 @@ OptionDescription& OptionSection::addOptionChaining(
// Should not be the same as singleName.
uassert(ErrorCodes::InternalError,
str::stream() << "Attempted to register option with conflict between singleName and "
- << "deprecatedSingleName: "
- << singleName,
+ << "deprecatedSingleName: " << singleName,
!std::count(deprecatedSingleNames.begin(), deprecatedSingleNames.end(), singleName));
// Should not contain any already registered name.
diff --git a/src/mongo/util/options_parser/options_parser.cpp b/src/mongo/util/options_parser/options_parser.cpp
index 76fdc7fd42a..99231dd132c 100644
--- a/src/mongo/util/options_parser/options_parser.cpp
+++ b/src/mongo/util/options_parser/options_parser.cpp
@@ -416,9 +416,7 @@ public:
uassert(ErrorCodes::BadValue,
str::stream()
- << nodeName
- << " expansion block must contain only '"
- << getExpansionName()
+ << nodeName << " expansion block must contain only '" << getExpansionName()
<< "', and optionally 'type', 'trim', and/or 'digest'/'digest_key' fields",
node.size() == numVisitedFields);
@@ -472,8 +470,7 @@ public:
&computed);
uassert(ErrorCodes::BadValue,
str::stream() << "SHA256HMAC of config expansion " << computed.toString()
- << " does not match expected digest: "
- << _digest->toString(),
+ << " does not match expected digest: " << _digest->toString(),
computed == *_digest);
}
@@ -487,8 +484,7 @@ public:
if (!status.isOK()) {
uasserted(status.code(),
str::stream() << "Failed processing output of " << getExpansionName()
- << " block for config file: "
- << status.reason());
+ << " block for config file: " << status.reason());
}
return newNode;
@@ -719,8 +715,7 @@ Status YAMLNodeToValue(const YAML::Node& YAMLNode,
if (stringMap.count(elemKey) > 0) {
return Status(ErrorCodes::BadValue,
str::stream() << "String Map Option: " << key
- << " has duplicate keys in YAML Config: "
- << elemKey);
+ << " has duplicate keys in YAML Config: " << elemKey);
}
stringMap[std::move(elemKey)] = elemVal.Scalar();
@@ -1028,10 +1023,10 @@ Status addYAMLNodesToEnvironment(const YAML::Node& root,
}
/**
-* For all options that we registered as composable, combine the values from source and dest
-* and set the result in dest. Note that this only works for options that are registered as
-* vectors of strings.
-*/
+ * For all options that we registered as composable, combine the values from source and dest
+ * and set the result in dest. Note that this only works for options that are registered as
+ * vectors of strings.
+ */
Status addCompositions(const OptionSection& options, const Environment& source, Environment* dest) {
std::vector<OptionDescription> options_vector;
Status ret = options.getAllOptions(&options_vector);
@@ -1126,9 +1121,9 @@ Status addCompositions(const OptionSection& options, const Environment& source,
}
/**
-* For all options that have constraints, add those constraints to our environment so that
-* they run when the environment gets validated.
-*/
+ * For all options that have constraints, add those constraints to our environment so that
+ * they run when the environment gets validated.
+ */
Status addConstraints(const OptionSection& options, Environment* dest) {
std::vector<std::shared_ptr<Constraint>> constraints_vector;
diff --git a/src/mongo/util/options_parser/options_parser_test.cpp b/src/mongo/util/options_parser/options_parser_test.cpp
index 42530ab08b6..65b61b08ab5 100644
--- a/src/mongo/util/options_parser/options_parser_test.cpp
+++ b/src/mongo/util/options_parser/options_parser_test.cpp
@@ -5096,7 +5096,8 @@ TEST(YAMLConfigFile, canonicalize) {
moe::OptionsParser parser;
moe::Environment env;
std::vector<std::string> argv = {
- "binary", "--bind_ip_all",
+ "binary",
+ "--bind_ip_all",
};
std::map<std::string, std::string> env_map;
ASSERT_OK(parser.run(opts, argv, env_map, &env));
diff --git a/src/mongo/util/perfctr_collect.cpp b/src/mongo/util/perfctr_collect.cpp
index c70f50fae3f..4fbe98dd419 100644
--- a/src/mongo/util/perfctr_collect.cpp
+++ b/src/mongo/util/perfctr_collect.cpp
@@ -296,9 +296,7 @@ StatusWith<std::vector<PerfCounterCollector::CounterInfo>> PerfCounterCollector:
if (status != PDH_MORE_DATA) {
return {ErrorCodes::WindowsPdhError,
str::stream() << formatFunctionCallError("PdhExpandCounterPathW", status)
- << " for counter '"
- << path
- << "'"};
+ << " for counter '" << path << "'"};
}
auto buf = std::make_unique<wchar_t[]>(pathListLength);
diff --git a/src/mongo/util/perfctr_collect_test.cpp b/src/mongo/util/perfctr_collect_test.cpp
index 35380d43421..869f72f9e20 100644
--- a/src/mongo/util/perfctr_collect_test.cpp
+++ b/src/mongo/util/perfctr_collect_test.cpp
@@ -178,22 +178,22 @@ TEST(FTDCPerfCollector, TestBadCollectionInput) {
ASSERT_NOT_OK(collection.addCountersGroup("cpu", {"\\Processor(0)\\% Idle Time"}));
// Duplicate counter
- ASSERT_NOT_OK(collection.addCountersGroup(
- "cpu2",
- {
- "\\Processor(0)\\% Idle Time", "\\Processor(0)\\% Idle Time",
- }));
+ ASSERT_NOT_OK(collection.addCountersGroup("cpu2",
+ {
+ "\\Processor(0)\\% Idle Time",
+ "\\Processor(0)\\% Idle Time",
+ }));
// Duplicate group
ASSERT_NOT_OK(
collection.addCountersGroupedByInstanceName("cpu", {"\\Processor(0)\\% Idle Time"}));
// Duplicate counter
- ASSERT_NOT_OK(collection.addCountersGroupedByInstanceName(
- "cpu2",
- {
- "\\Processor(0)\\% Idle Time", "\\Processor(0)\\% Idle Time",
- }));
+ ASSERT_NOT_OK(collection.addCountersGroupedByInstanceName("cpu2",
+ {
+ "\\Processor(0)\\% Idle Time",
+ "\\Processor(0)\\% Idle Time",
+ }));
}
// Test negative collector input
diff --git a/src/mongo/util/periodic_runner.h b/src/mongo/util/periodic_runner.h
index 93a03498357..e9dcfa67489 100644
--- a/src/mongo/util/periodic_runner.h
+++ b/src/mongo/util/periodic_runner.h
@@ -143,7 +143,7 @@ public:
* Each wrapped PeriodicRunner::ControllableJob function on this object throws
* if the underlying job is gone (e.g. in shutdown).
*/
-class[[nodiscard]] PeriodicJobAnchor {
+class [[nodiscard]] PeriodicJobAnchor {
public:
using Job = PeriodicRunner::ControllableJob;
diff --git a/src/mongo/util/periodic_runner_factory.cpp b/src/mongo/util/periodic_runner_factory.cpp
index 66cddf81be6..34aa8c86458 100644
--- a/src/mongo/util/periodic_runner_factory.cpp
+++ b/src/mongo/util/periodic_runner_factory.cpp
@@ -40,4 +40,4 @@ std::unique_ptr<PeriodicRunner> makePeriodicRunner(ServiceContext* svc) {
return std::make_unique<PeriodicRunnerImpl>(svc, svc->getPreciseClockSource());
}
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/util/periodic_runner_impl.cpp b/src/mongo/util/periodic_runner_impl.cpp
index fc21a7184a4..98a517cf7d9 100644
--- a/src/mongo/util/periodic_runner_impl.cpp
+++ b/src/mongo/util/periodic_runner_impl.cpp
@@ -57,7 +57,7 @@ PeriodicRunnerImpl::PeriodicJobImpl::PeriodicJobImpl(PeriodicJob job,
: _job(std::move(job)), _clockSource(source), _serviceContext(svc) {}
void PeriodicRunnerImpl::PeriodicJobImpl::_run() {
- auto[startPromise, startFuture] = makePromiseFuture<void>();
+ auto [startPromise, startFuture] = makePromiseFuture<void>();
{
stdx::lock_guard lk(_mutex);
@@ -65,7 +65,7 @@ void PeriodicRunnerImpl::PeriodicJobImpl::_run() {
}
- _thread = stdx::thread([ this, startPromise = std::move(startPromise) ]() mutable {
+ _thread = stdx::thread([this, startPromise = std::move(startPromise)]() mutable {
auto guard = makeGuard([this] { _stopPromise.emplaceValue(); });
Client::initThread(_job.name, _serviceContext, nullptr);
diff --git a/src/mongo/util/polymorphic_scoped.h b/src/mongo/util/polymorphic_scoped.h
index cace603334b..c6df4086503 100644
--- a/src/mongo/util/polymorphic_scoped.h
+++ b/src/mongo/util/polymorphic_scoped.h
@@ -31,8 +31,8 @@
namespace mongo {
/**
-* Base class to implement interfaces with RAII-style objects
-*/
+ * Base class to implement interfaces with RAII-style objects
+ */
class PolymorphicScoped {
public:
virtual ~PolymorphicScoped() = default;
diff --git a/src/mongo/util/processinfo.h b/src/mongo/util/processinfo.h
index 58a2ad4c686..43cde512599 100644
--- a/src/mongo/util/processinfo.h
+++ b/src/mongo/util/processinfo.h
@@ -245,4 +245,4 @@ private:
};
bool writePidFile(const std::string& path);
-}
+} // namespace mongo
diff --git a/src/mongo/util/processinfo_linux.cpp b/src/mongo/util/processinfo_linux.cpp
index 478851ec91c..d38acf0393e 100644
--- a/src/mongo/util/processinfo_linux.cpp
+++ b/src/mongo/util/processinfo_linux.cpp
@@ -132,7 +132,7 @@ public:
&_exit_signal, &_processor,
&_rtprio, &_sched
*/
- );
+ );
if (found == 0) {
std::cout << "system error: reading proc info" << std::endl;
}
@@ -248,8 +248,8 @@ public:
class LinuxSysHelper {
public:
/**
- * Read the first 1023 bytes from a file
- */
+ * Read the first 1023 bytes from a file
+ */
static std::string readLineFromFile(const char* fname) {
FILE* f;
char fstr[1024] = {0};
@@ -264,8 +264,8 @@ public:
}
/**
- * Get some details about the CPU
- */
+ * Get some details about the CPU
+ */
static void getCpuInfo(int& procCount, std::string& freq, std::string& features) {
FILE* f;
char fstr[1024] = {0};
@@ -290,8 +290,8 @@ public:
}
/**
- * Determine linux distro and version
- */
+ * Determine linux distro and version
+ */
static void getLinuxDistro(std::string& name, std::string& version) {
char buf[4096] = {0};
@@ -387,8 +387,8 @@ public:
}
/**
- * Get system memory total
- */
+ * Get system memory total
+ */
static unsigned long long getSystemMemorySize() {
std::string meminfo = readLineFromFile("/proc/meminfo");
size_t lineOff = 0;
@@ -413,11 +413,11 @@ public:
}
/**
- * Get memory limit for the process.
- * If memory is being limited by the applied control group and it's less
- * than the OS system memory (default cgroup limit is ulonglong max) let's
- * return the actual memory we'll have available to the process.
- */
+ * Get memory limit for the process.
+ * If memory is being limited by the applied control group and it's less
+ * than the OS system memory (default cgroup limit is ulonglong max) let's
+ * return the actual memory we'll have available to the process.
+ */
static unsigned long long getMemorySizeLimit() {
unsigned long long systemMemBytes = getSystemMemorySize();
unsigned long long cgroupMemBytes = 0;
@@ -508,8 +508,8 @@ void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {
}
/**
-* Save a BSON obj representing the host system's details
-*/
+ * Save a BSON obj representing the host system's details
+ */
void ProcessInfo::SystemInfo::collectSystemInfo() {
utsname unameData;
std::string distroName, distroVersion;
@@ -563,8 +563,8 @@ void ProcessInfo::SystemInfo::collectSystemInfo() {
}
/**
-* Determine if the process is running with (cc)NUMA
-*/
+ * Determine if the process is running with (cc)NUMA
+ */
bool ProcessInfo::checkNumaEnabled() {
bool hasMultipleNodes = false;
bool hasNumaMaps = false;
@@ -619,4 +619,4 @@ bool ProcessInfo::pagesInMemory(const void* start, size_t numPages, std::vector<
}
return true;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/processinfo_openbsd.cpp b/src/mongo/util/processinfo_openbsd.cpp
index 234d2e9d366..34dade8a885 100644
--- a/src/mongo/util/processinfo_openbsd.cpp
+++ b/src/mongo/util/processinfo_openbsd.cpp
@@ -217,4 +217,4 @@ boost::optional<unsigned long> ProcessInfo::getNumCoresForProcess() {
return nprocs;
return boost::none;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/processinfo_osx.cpp b/src/mongo/util/processinfo_osx.cpp
index 0ed7de3a1f9..45fc77f68ff 100644
--- a/src/mongo/util/processinfo_osx.cpp
+++ b/src/mongo/util/processinfo_osx.cpp
@@ -240,4 +240,4 @@ bool ProcessInfo::pagesInMemory(const void* start, size_t numPages, std::vector<
}
return true;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/processinfo_solaris.cpp b/src/mongo/util/processinfo_solaris.cpp
index 9d7d66f9891..c12ce0e6da9 100644
--- a/src/mongo/util/processinfo_solaris.cpp
+++ b/src/mongo/util/processinfo_solaris.cpp
@@ -242,4 +242,4 @@ bool ProcessInfo::pagesInMemory(const void* start, size_t numPages, std::vector<
}
return true;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/processinfo_test.cpp b/src/mongo/util/processinfo_test.cpp
index 47fb5e91b5c..158c1186268 100644
--- a/src/mongo/util/processinfo_test.cpp
+++ b/src/mongo/util/processinfo_test.cpp
@@ -36,8 +36,8 @@
#include "mongo/unittest/unittest.h"
#include "mongo/util/processinfo.h"
-using mongo::ProcessInfo;
using boost::optional;
+using mongo::ProcessInfo;
namespace mongo_test {
TEST(ProcessInfo, SysInfoIsInitialized) {
@@ -65,4 +65,4 @@ TEST(ProcessInfo, GetNumAvailableCores) {
TEST(ProcessInfo, GetNumCoresReturnsNonZeroNumberOfProcessors) {
ASSERT_GREATER_THAN(ProcessInfo::getNumCores(), 0u);
}
-}
+} // namespace mongo_test
diff --git a/src/mongo/util/processinfo_unknown.cpp b/src/mongo/util/processinfo_unknown.cpp
index 338c6efd857..05f84b7f22c 100644
--- a/src/mongo/util/processinfo_unknown.cpp
+++ b/src/mongo/util/processinfo_unknown.cpp
@@ -78,4 +78,4 @@ bool ProcessInfo::pagesInMemory(const void* start, size_t numPages, std::vector<
boost::optional<unsigned long> ProcessInfo::getNumCoresForProcess() {
return boost::none;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/processinfo_windows.cpp b/src/mongo/util/processinfo_windows.cpp
index e545778f897..3e6e0b27aae 100644
--- a/src/mongo/util/processinfo_windows.cpp
+++ b/src/mongo/util/processinfo_windows.cpp
@@ -413,4 +413,4 @@ bool ProcessInfo::pagesInMemory(const void* start, size_t numPages, std::vector<
}
return true;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/procparser.cpp b/src/mongo/util/procparser.cpp
index 630e2888eee..78e1a244155 100644
--- a/src/mongo/util/procparser.cpp
+++ b/src/mongo/util/procparser.cpp
@@ -93,8 +93,8 @@ StatusWith<std::string> readFileAsString(StringData filename) {
if (fd == -1) {
int err = errno;
return Status(ErrorCodes::FileOpenFailed,
- str::stream() << "Failed to open file " << filename << " with error: "
- << errnoWithDescription(err));
+ str::stream() << "Failed to open file " << filename
+ << " with error: " << errnoWithDescription(err));
}
auto scopedGuard = makeGuard([fd] { close(fd); });
@@ -122,8 +122,8 @@ StatusWith<std::string> readFileAsString(StringData filename) {
}
return Status(ErrorCodes::FileStreamFailed,
- str::stream() << "Failed to read file " << filename << " with error: "
- << errnoWithDescription(err));
+ str::stream() << "Failed to read file " << filename
+ << " with error: " << errnoWithDescription(err));
}
break;
@@ -432,11 +432,10 @@ Status parseProcNetstat(const std::vector<StringData>& keys,
// Split the file by lines.
uint32_t lineNum = 0;
- for (string_split_iterator
- lineIt = string_split_iterator(
- data.begin(),
- data.end(),
- boost::token_finder([](char c) { return c == '\n'; }, boost::token_compress_on));
+ for (string_split_iterator lineIt = string_split_iterator(
+ data.begin(),
+ data.end(),
+ boost::token_finder([](char c) { return c == '\n'; }, boost::token_compress_on));
lineIt != string_split_iterator();
++lineIt, ++lineNum) {
diff --git a/src/mongo/util/procparser.h b/src/mongo/util/procparser.h
index 9bae32cba10..8fd39d0fd35 100644
--- a/src/mongo/util/procparser.h
+++ b/src/mongo/util/procparser.h
@@ -60,12 +60,12 @@ Status parseProcStat(const std::vector<StringData>& keys,
BSONObjBuilder* builder);
/**
-* Read from file, and write the specified list of keys into builder.
-*
-* See parseProcStat.
-*
-* Returns Status errors on file reading issues.
-*/
+ * Read from file, and write the specified list of keys into builder.
+ *
+ * See parseProcStat.
+ *
+ * Returns Status errors on file reading issues.
+ */
Status parseProcStatFile(StringData filename,
const std::vector<StringData>& keys,
BSONObjBuilder* builder);
diff --git a/src/mongo/util/procparser_test.cpp b/src/mongo/util/procparser_test.cpp
index 0afd85726c1..1fba705f929 100644
--- a/src/mongo/util/procparser_test.cpp
+++ b/src/mongo/util/procparser_test.cpp
@@ -208,7 +208,12 @@ TEST(FTDCProcStat, TestStat) {
// otherwise.
TEST(FTDCProcStat, TestLocalStat) {
std::vector<StringData> keys{
- "btime", "cpu", "ctxt", "processes", "procs_blocked", "procs_running",
+ "btime",
+ "cpu",
+ "ctxt",
+ "processes",
+ "procs_blocked",
+ "procs_running",
};
BSONObjBuilder builder;
@@ -237,7 +242,12 @@ TEST(FTDCProcStat, TestLocalStat) {
TEST(FTDCProcStat, TestLocalNonExistentStat) {
std::vector<StringData> keys{
- "btime", "cpu", "ctxt", "processes", "procs_blocked", "procs_running",
+ "btime",
+ "cpu",
+ "ctxt",
+ "processes",
+ "procs_blocked",
+ "procs_running",
};
BSONObjBuilder builder;
diff --git a/src/mongo/util/producer_consumer_queue.h b/src/mongo/util/producer_consumer_queue.h
index c103515d19f..05b39eff7db 100644
--- a/src/mongo/util/producer_consumer_queue.h
+++ b/src/mongo/util/producer_consumer_queue.h
@@ -336,8 +336,7 @@ public:
explicit Waiter(ProducerState& x, size_t wants) : _x(x) {
uassert(ErrorCodes::ProducerConsumerQueueProducerQueueDepthExceeded,
str::stream() << "ProducerConsumerQueue producer queue depth exceeded, "
- << (_x._producerQueueDepth + wants)
- << " > "
+ << (_x._producerQueueDepth + wants) << " > "
<< _x._maxProducerQueueDepth,
_x._maxProducerQueueDepth == std::numeric_limits<size_t>::max() ||
_x._producerQueueDepth + wants <= _x._maxProducerQueueDepth);
@@ -473,8 +472,7 @@ public:
auto cost = _invokeCostFunc(t, lk);
uassert(ErrorCodes::ProducerConsumerQueueBatchTooLarge,
str::stream() << "cost of item (" << cost
- << ") larger than maximum queue size ("
- << _options.maxQueueDepth
+ << ") larger than maximum queue size (" << _options.maxQueueDepth
<< ")",
cost <= _options.maxQueueDepth);
@@ -506,8 +504,7 @@ public:
uassert(ErrorCodes::ProducerConsumerQueueBatchTooLarge,
str::stream() << "cost of items in batch (" << cost
- << ") larger than maximum queue size ("
- << _options.maxQueueDepth
+ << ") larger than maximum queue size (" << _options.maxQueueDepth
<< ")",
cost <= _options.maxQueueDepth);
diff --git a/src/mongo/util/producer_consumer_queue_test.cpp b/src/mongo/util/producer_consumer_queue_test.cpp
index 34ff9227a8d..ba39482d0d0 100644
--- a/src/mongo/util/producer_consumer_queue_test.cpp
+++ b/src/mongo/util/producer_consumer_queue_test.cpp
@@ -861,7 +861,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(pipeProducerEndClosesAfterProducersLeave,
ASSERT_EQUALS(consumer.pop(), MoveOnly(2));
auto thread3 =
- helper.runThread("Producer3", [producer = std::move(producer)](OperationContext * opCtx) {
+ helper.runThread("Producer3", [producer = std::move(producer)](OperationContext* opCtx) {
producer.push(MoveOnly(3), opCtx);
});
@@ -882,7 +882,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(pipeConsumerEndClosesAfterConsumersLeave,
helper.runThread("Consumer2", [consumer](OperationContext* opCtx) { consumer.pop(opCtx); });
auto thread3 =
- helper.runThread("Consumer3", [consumer = std::move(consumer)](OperationContext * opCtx) {
+ helper.runThread("Consumer3", [consumer = std::move(consumer)](OperationContext* opCtx) {
consumer.pop(opCtx);
});
diff --git a/src/mongo/util/progress_meter.cpp b/src/mongo/util/progress_meter.cpp
index 6287cbd7552..1869eca8a95 100644
--- a/src/mongo/util/progress_meter.cpp
+++ b/src/mongo/util/progress_meter.cpp
@@ -100,4 +100,4 @@ std::string ProgressMeter::toString() const {
return buf.str();
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/progress_meter.h b/src/mongo/util/progress_meter.h
index c666c0e90a2..35b3305f94a 100644
--- a/src/mongo/util/progress_meter.h
+++ b/src/mongo/util/progress_meter.h
@@ -168,4 +168,4 @@ public:
private:
ProgressMeter* _pm;
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/queue.h b/src/mongo/util/queue.h
index 63321eeb926..c3a56d4db21 100644
--- a/src/mongo/util/queue.h
+++ b/src/mongo/util/queue.h
@@ -278,4 +278,4 @@ private:
stdx::condition_variable _cvNoLongerFull;
stdx::condition_variable _cvNoLongerEmpty;
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/regex_util.cpp b/src/mongo/util/regex_util.cpp
index 1a596f5375c..2338e32f0c3 100644
--- a/src/mongo/util/regex_util.cpp
+++ b/src/mongo/util/regex_util.cpp
@@ -56,13 +56,13 @@ pcrecpp::RE_Options flagsToPcreOptions(StringData optionFlags,
continue;
default:
if (!ignoreInvalidFlags) {
- uasserted(
- 51108,
- str::stream() << opName << " invalid flag in regex options: " << flag);
+ uasserted(51108,
+ str::stream()
+ << opName << " invalid flag in regex options: " << flag);
}
}
}
return opt;
}
-}
-}
+} // namespace regex_util
+} // namespace mongo
diff --git a/src/mongo/util/regex_util.h b/src/mongo/util/regex_util.h
index 9be72ba94d7..f187c8eddfc 100644
--- a/src/mongo/util/regex_util.h
+++ b/src/mongo/util/regex_util.h
@@ -42,5 +42,5 @@ namespace regex_util {
pcrecpp::RE_Options flagsToPcreOptions(StringData optionFlags,
bool ignoreInvalidOptions,
StringData opName = "");
-}
-}
+} // namespace regex_util
+} // namespace mongo
diff --git a/src/mongo/util/safe_num.h b/src/mongo/util/safe_num.h
index 529adec4878..7f16cd036f3 100644
--- a/src/mongo/util/safe_num.h
+++ b/src/mongo/util/safe_num.h
@@ -40,7 +40,7 @@ namespace mongo {
namespace mutablebson {
class Element;
class Document;
-}
+} // namespace mutablebson
/**
* SafeNum holds and does arithmetic on a number in a safe way, handling overflow
diff --git a/src/mongo/util/safe_num_test.cpp b/src/mongo/util/safe_num_test.cpp
index 426c4d2809e..7fb581b786f 100644
--- a/src/mongo/util/safe_num_test.cpp
+++ b/src/mongo/util/safe_num_test.cpp
@@ -40,8 +40,8 @@
namespace {
-using mongo::SafeNum;
using mongo::Decimal128;
+using mongo::SafeNum;
TEST(Basics, Initialization) {
const SafeNum numInt(0);
diff --git a/src/mongo/util/scopeguard.h b/src/mongo/util/scopeguard.h
index bf2e571f8d0..6ba256f6eeb 100644
--- a/src/mongo/util/scopeguard.h
+++ b/src/mongo/util/scopeguard.h
@@ -37,7 +37,7 @@
namespace mongo {
template <typename F>
-class[[nodiscard]] ScopeGuard {
+class [[nodiscard]] ScopeGuard {
public:
template <typename FuncArg>
explicit ScopeGuard(FuncArg && f) : _func(std::forward<FuncArg>(f)) {}
diff --git a/src/mongo/util/shared_buffer.h b/src/mongo/util/shared_buffer.h
index c026f92f848..83a9f02bc2b 100644
--- a/src/mongo/util/shared_buffer.h
+++ b/src/mongo/util/shared_buffer.h
@@ -228,4 +228,4 @@ private:
inline void swap(ConstSharedBuffer& one, ConstSharedBuffer& two) {
one.swap(two);
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/signal_handlers.cpp b/src/mongo/util/signal_handlers.cpp
index 2f361b363e5..f4ff57e8238 100644
--- a/src/mongo/util/signal_handlers.cpp
+++ b/src/mongo/util/signal_handlers.cpp
@@ -65,7 +65,7 @@ const char* strsignal(int signalNum) {
return "UNKNOWN";
}
}
-}
+} // namespace
#endif
namespace mongo {
diff --git a/src/mongo/util/signal_win32.cpp b/src/mongo/util/signal_win32.cpp
index 3f4163c514a..e4e51e4b19f 100644
--- a/src/mongo/util/signal_win32.cpp
+++ b/src/mongo/util/signal_win32.cpp
@@ -42,4 +42,4 @@ std::string getShutdownSignalName(int processId) {
return str::stream() << strEventNamePrefix << processId;
}
#endif
-}
+} // namespace mongo
diff --git a/src/mongo/util/signal_win32.h b/src/mongo/util/signal_win32.h
index d05bde04899..1127e549a1f 100644
--- a/src/mongo/util/signal_win32.h
+++ b/src/mongo/util/signal_win32.h
@@ -37,4 +37,4 @@ namespace mongo {
// Generate windows event name for shutdown signal
std::string getShutdownSignalName(int processId);
#endif
-}
+} // namespace mongo
diff --git a/src/mongo/util/stack_introspect.h b/src/mongo/util/stack_introspect.h
index 9087711ad69..9f431160332 100644
--- a/src/mongo/util/stack_introspect.h
+++ b/src/mongo/util/stack_introspect.h
@@ -44,4 +44,4 @@ bool inConstructorChain(bool printOffending = false);
* @return if supported on platform, compile options may still prevent it from working
*/
bool inConstructorChainSupported();
-}
+} // namespace mongo
diff --git a/src/mongo/util/stacktrace_posix.cpp b/src/mongo/util/stacktrace_posix.cpp
index d9d63b89109..29775d035b1 100644
--- a/src/mongo/util/stacktrace_posix.cpp
+++ b/src/mongo/util/stacktrace_posix.cpp
@@ -543,12 +543,12 @@ void addOSComponentsToSoMap(BSONObjBuilder* soMap) {
}
}
}
-} // namepace
+} // namespace
} // namespace mongo
#else
namespace mongo {
namespace {
void addOSComponentsToSoMap(BSONObjBuilder* soMap) {}
-} // namepace
+} // namespace
} // namespace mongo
#endif
diff --git a/src/mongo/util/stacktrace_unwind.cpp b/src/mongo/util/stacktrace_unwind.cpp
index a70667e45bf..c5aff514880 100644
--- a/src/mongo/util/stacktrace_unwind.cpp
+++ b/src/mongo/util/stacktrace_unwind.cpp
@@ -596,12 +596,12 @@ void addOSComponentsToSoMap(BSONObjBuilder* soMap) {
}
}
}
-} // namepace
+} // namespace
} // namespace mongo
#else
namespace mongo {
namespace {
void addOSComponentsToSoMap(BSONObjBuilder* soMap) {}
-} // namepace
+} // namespace
} // namespace mongo
#endif
diff --git a/src/mongo/util/stacktrace_windows.cpp b/src/mongo/util/stacktrace_windows.cpp
index 6aef676a4b2..06f401c6b8c 100644
--- a/src/mongo/util/stacktrace_windows.cpp
+++ b/src/mongo/util/stacktrace_windows.cpp
@@ -356,4 +356,4 @@ int crtDebugCallback(int, char* originalMessage, int*) {
log() << "*** C runtime error: " << message.substr(0, message.find('\n')) << ", terminating";
fassertFailed(17006);
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/string_map_test.cpp b/src/mongo/util/string_map_test.cpp
index b244313db6b..24bae71587c 100644
--- a/src/mongo/util/string_map_test.cpp
+++ b/src/mongo/util/string_map_test.cpp
@@ -212,11 +212,14 @@ TEST(StringMapTest, Assign) {
TEST(StringMapTest, InitWithInitializerList) {
StringMap<int> smap{
- {"q", 1}, {"coollog", 2}, {"mango", 3}, {"mango", 4},
+ {"q", 1},
+ {"coollog", 2},
+ {"mango", 3},
+ {"mango", 4},
};
ASSERT_EQ(1, smap["q"]);
ASSERT_EQ(2, smap["coollog"]);
ASSERT_EQ(3, smap["mango"]);
}
-}
+} // namespace
diff --git a/src/mongo/util/summation_test.cpp b/src/mongo/util/summation_test.cpp
index 1fd8a632640..72b29cc47de 100644
--- a/src/mongo/util/summation_test.cpp
+++ b/src/mongo/util/summation_test.cpp
@@ -41,41 +41,41 @@ namespace mongo {
namespace {
using limits = std::numeric_limits<long long>;
-std::vector<long long> longValues = {
- limits::min(),
- limits::min() + 1,
- limits::min() / 2,
- -(1LL << 53),
- -(1LL << 52),
- -(1LL << 32),
- -0x100,
- -0xff,
- -0xaa,
- -0x55,
- -1,
- 0,
- 1,
- 2,
- 0x55,
- 0x80,
- 0xaa,
- 0x100,
- 512,
- 1024,
- 2048,
- 1LL << 31,
- 1LL << 32,
- 1LL << 52,
- 1LL << 53,
- limits::max() / 2,
+std::vector<long long> longValues = {limits::min(),
+ limits::min() + 1,
+ limits::min() / 2,
+ -(1LL << 53),
+ -(1LL << 52),
+ -(1LL << 32),
+ -0x100,
+ -0xff,
+ -0xaa,
+ -0x55,
+ -1,
+ 0,
+ 1,
+ 2,
+ 0x55,
+ 0x80,
+ 0xaa,
+ 0x100,
+ 512,
+ 1024,
+ 2048,
+ 1LL << 31,
+ 1LL << 32,
+ 1LL << 52,
+ 1LL << 53,
+ limits::max() / 2,
#pragma warning(push)
// C4308: negative integral constant converted to unsigned type
#pragma warning(disable : 4308)
- static_cast<long long>(1ULL << 63) - (1ULL << (63 - 53 - 1)), // Halfway between two doubles
+ static_cast<long long>(1ULL << 63) -
+ (1ULL << (63 - 53 - 1)), // Halfway between two doubles
#pragma warning(pop)
- limits::max() - 1,
- limits::max()};
+ limits::max() - 1,
+ limits::max()};
std::vector<double> doubleValues = {
1.4831356930199802e-05, -3.121724665346865, 3041897608700.073, 1001318343149.7166,
diff --git a/src/mongo/util/tcmalloc_set_parameter.cpp b/src/mongo/util/tcmalloc_set_parameter.cpp
index b7dd65fe752..87022f520da 100644
--- a/src/mongo/util/tcmalloc_set_parameter.cpp
+++ b/src/mongo/util/tcmalloc_set_parameter.cpp
@@ -75,18 +75,16 @@ StatusWith<size_t> validateTCMallocValue(StringData name, const BSONElement& new
return {ErrorCodes::TypeMismatch,
str::stream() << "Expected server parameter " << name
<< " to have numeric type, but found "
- << newValueElement.toString(false)
- << " of type "
+ << newValueElement.toString(false) << " of type "
<< typeName(newValueElement.type())};
}
long long valueAsLongLong = newValueElement.safeNumberLong();
if (valueAsLongLong < 0 ||
static_cast<unsigned long long>(valueAsLongLong) > std::numeric_limits<size_t>::max()) {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Value " << newValueElement.toString(false) << " is out of range for "
- << name
- << "; expected a value between 0 and "
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Value " << newValueElement.toString(false) << " is out of range for "
+ << name << "; expected a value between 0 and "
<< std::min<unsigned long long>(std::numeric_limits<size_t>::max(),
std::numeric_limits<long long>::max()));
}
diff --git a/src/mongo/util/text.cpp b/src/mongo/util/text.cpp
index f01ed7797a4..be0e18eea3a 100644
--- a/src/mongo/util/text.cpp
+++ b/src/mongo/util/text.cpp
@@ -184,7 +184,7 @@ std::wstring toWideString(const char* utf8String) {
-1, // Count, -1 for NUL-terminated
nullptr, // No output buffer
0 // Zero means "compute required size"
- );
+ );
if (bufferSize == 0) {
return std::wstring();
}
@@ -196,7 +196,7 @@ std::wstring toWideString(const char* utf8String) {
-1, // Count, -1 for NUL-terminated
tempBuffer.get(), // UTF-16 output buffer
bufferSize // Buffer size in wide characters
- );
+ );
return std::wstring(tempBuffer.get());
}
@@ -214,7 +214,7 @@ bool writeUtf8ToWindowsConsole(const char* utf8String, unsigned int utf8StringSi
utf8StringSize, // Input string length
nullptr, // No output buffer
0 // Zero means "compute required size"
- );
+ );
if (bufferSize == 0) {
return true;
}
@@ -225,7 +225,7 @@ bool writeUtf8ToWindowsConsole(const char* utf8String, unsigned int utf8StringSi
utf8StringSize, // Input string length
utf16String.get(), // UTF-16 output buffer
bufferSize // Buffer size in wide characters
- );
+ );
const wchar_t* utf16Pointer = utf16String.get();
size_t numberOfCharactersToWrite = bufferSize;
HANDLE consoleHandle = GetStdHandle(STD_OUTPUT_HANDLE);
diff --git a/src/mongo/util/tick_source_test.cpp b/src/mongo/util/tick_source_test.cpp
index 78ea3dac678..aef28a7e97c 100644
--- a/src/mongo/util/tick_source_test.cpp
+++ b/src/mongo/util/tick_source_test.cpp
@@ -52,5 +52,5 @@ TEST(TickSourceTest, TicksToDurationConversion) {
tsMicros.reset(1);
ASSERT_EQ(tsMicros.ticksTo<Microseconds>(tsMicros.getTicks()).count(), 1);
}
-}
+} // namespace
} // namespace mongo
diff --git a/src/mongo/util/unique_function_test.cpp b/src/mongo/util/unique_function_test.cpp
index 66f10ef01e7..8689951a7cb 100644
--- a/src/mongo/util/unique_function_test.cpp
+++ b/src/mongo/util/unique_function_test.cpp
@@ -141,7 +141,7 @@ TEST(UniqueFunctionTest, reassign_simple_unique_function_from_lambda) {
TEST(UniqueFunctionTest, accepts_a_functor_that_is_move_only) {
struct Checker {};
- mongo::unique_function<void()> uf = [checkerPtr = std::make_unique<Checker>()]{};
+ mongo::unique_function<void()> uf = [checkerPtr = std::make_unique<Checker>()] {};
mongo::unique_function<void()> uf2 = std::move(uf);
diff --git a/src/mongo/util/unowned_ptr_test.cpp b/src/mongo/util/unowned_ptr_test.cpp
index 4a6ec0b1d34..b6acea486f6 100644
--- a/src/mongo/util/unowned_ptr_test.cpp
+++ b/src/mongo/util/unowned_ptr_test.cpp
@@ -155,4 +155,4 @@ TEST(UnownedPtr, Equality) {
ASSERT_NE(unowned_ptr<int>(), unowned_ptr<int>(&i)); // NULL != non-NULL
ASSERT_NE(unowned_ptr<int>(&i), unowned_ptr<int>(&j)); // two distinct non-NULLs
}
-}
+} // namespace mongo
diff --git a/src/mongo/watchdog/watchdog_mongod.h b/src/mongo/watchdog/watchdog_mongod.h
index 186e21e4a47..06892de6543 100644
--- a/src/mongo/watchdog/watchdog_mongod.h
+++ b/src/mongo/watchdog/watchdog_mongod.h
@@ -34,8 +34,8 @@
namespace mongo {
/**
-* Start the watchdog.
-*/
+ * Start the watchdog.
+ */
void startWatchdog();
/**